gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
import sys
from django.utils.encoding import force_bytes
from email.Utils import formatdate
from time import time
from urlparse import parse_qsl
from services.utils import mypool, settings
# This has to be imported after the settings so statsd knows where to log to.
from django_statsd.clients import statsd
try:
from compare import version_int
except ImportError:
from olympia.versions.compare import version_int
from olympia.constants import applications, base
import olympia.core.logger
from utils import (
APP_GUIDS, get_cdn_url, log_configure, PLATFORMS)
# Go configure the log.
log_configure()
good_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
<RDF:li resource="urn:mozilla:%(type)s:%(guid)s:%(version)s"/>
</RDF:Seq>
</em:updates>
</RDF:Description>
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s:%(version)s">
<em:version>%(version)s</em:version>
<em:targetApplication>
<RDF:Description>
<em:id>%(appguid)s</em:id>
<em:minVersion>%(min)s</em:minVersion>
<em:maxVersion>%(max)s</em:maxVersion>
<em:updateLink>%(url)s</em:updateLink>
%(if_update)s
%(if_hash)s
</RDF:Description>
</em:targetApplication>
</RDF:Description>
</RDF:RDF>"""
bad_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
</RDF:RDF>"""
no_updates_rdf = """<?xml version="1.0"?>
<RDF:RDF xmlns:RDF="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:em="http://www.mozilla.org/2004/em-rdf#">
<RDF:Description about="urn:mozilla:%(type)s:%(guid)s">
<em:updates>
<RDF:Seq>
</RDF:Seq>
</em:updates>
</RDF:Description>
</RDF:RDF>"""
error_log = olympia.core.logger.getLogger('z.services')
class Update(object):
def __init__(self, data, compat_mode='strict'):
self.conn, self.cursor = None, None
self.data = data.copy()
self.data['row'] = {}
self.version_int = 0
self.compat_mode = compat_mode
def is_valid(self):
# If you accessing this from unit tests, then before calling
# is valid, you can assign your own cursor.
if not self.cursor:
self.conn = mypool.connect()
self.cursor = self.conn.cursor()
data = self.data
# Version can be blank.
data['version'] = data.get('version', '')
for field in ['reqVersion', 'id', 'appID', 'appVersion']:
if field not in data:
return False
data['app_id'] = APP_GUIDS.get(data['appID'])
if not data['app_id']:
return False
sql = """SELECT id, status, addontype_id, guid FROM addons
WHERE guid = %(guid)s AND
inactive = 0 AND
status NOT IN (%(STATUS_DELETED)s, %(STATUS_DISABLED)s)
LIMIT 1;"""
self.cursor.execute(sql, {
'guid': self.data['id'],
'STATUS_DELETED': base.STATUS_DELETED,
'STATUS_DISABLED': base.STATUS_DISABLED,
})
result = self.cursor.fetchone()
if result is None:
return False
data['id'], data['addon_status'], data['type'], data['guid'] = result
data['version_int'] = version_int(data['appVersion'])
if 'appOS' in data:
for k, v in PLATFORMS.items():
if k in data['appOS']:
data['appOS'] = v
break
else:
data['appOS'] = None
return True
def get_update(self):
data = self.data
data['STATUS_PUBLIC'] = base.STATUS_PUBLIC
data['STATUS_BETA'] = base.STATUS_BETA
data['RELEASE_CHANNEL_LISTED'] = base.RELEASE_CHANNEL_LISTED
sql = ["""
SELECT
addons.guid as guid, addons.addontype_id as type,
addons.inactive as disabled_by_user, appmin.version as min,
appmax.version as max, files.id as file_id,
files.status as file_status, files.hash,
files.filename, versions.id as version_id,
files.datestatuschanged as datestatuschanged,
files.strict_compatibility as strict_compat,
versions.releasenotes, versions.version as version
FROM versions
INNER JOIN addons
ON addons.id = versions.addon_id AND addons.id = %(id)s
INNER JOIN applications_versions
ON applications_versions.version_id = versions.id
INNER JOIN appversions appmin
ON appmin.id = applications_versions.min
AND appmin.application_id = %(app_id)s
INNER JOIN appversions appmax
ON appmax.id = applications_versions.max
AND appmax.application_id = %(app_id)s
INNER JOIN files
ON files.version_id = versions.id AND (files.platform_id = 1
"""]
if data.get('appOS'):
sql.append(' OR files.platform_id = %(appOS)s')
sql.append("""
)
-- Find a reference to the user's current version, if it exists.
-- These should never be inner joins. We need results even if we
-- can't find the current version.
LEFT JOIN versions curver
ON curver.addon_id = addons.id AND curver.version = %(version)s
LEFT JOIN files curfile
ON curfile.version_id = curver.id
WHERE
versions.deleted = 0 AND
versions.channel = %(RELEASE_CHANNEL_LISTED)s AND
-- Note that the WHEN clauses here will evaluate to the same
-- thing for each row we examine. The JOINs above narrow the
-- rows matched by the WHERE clause to versions of a specific
-- add-on, and the ORDER BY and LIMIT 1 clauses below make it
-- unlikely that we'll be examining a large number of rows,
-- so this is fairly cheap.
CASE
WHEN curfile.status = %(STATUS_BETA)s
THEN
-- User's current version is a known beta version.
--
-- Serve only beta updates. Serving a full version here
-- will forever kick users out of the beta update channel.
--
-- If the add-on does not have full review, serve no
-- updates.
addons.status = %(STATUS_PUBLIC)s AND
files.status = %(STATUS_BETA)s
ELSE
-- Anything else, including:
--
-- * Add-on has full review
-- * User's current version has full review, regardless
-- of add-on status
--
-- Serve only full-reviewed updates.
files.status = %(STATUS_PUBLIC)s
END
""")
sql.append('AND appmin.version_int <= %(version_int)s ')
if self.compat_mode == 'ignore':
pass # no further SQL modification required.
elif self.compat_mode == 'normal':
# When file has strict_compatibility enabled, or file has binary
# components, default to compatible is disabled.
sql.append("""AND
CASE WHEN files.strict_compatibility = 1 OR
files.binary_components = 1
THEN appmax.version_int >= %(version_int)s ELSE 1 END
""")
# Filter out versions that don't have the minimum maxVersion
# requirement to qualify for default-to-compatible.
d2c_min = applications.D2C_MIN_VERSIONS.get(data['app_id'])
if d2c_min:
data['d2c_min_version'] = version_int(d2c_min)
sql.append("AND appmax.version_int >= %(d2c_min_version)s ")
# Filter out versions found in compat overrides
sql.append("""AND
NOT versions.id IN (
SELECT version_id FROM incompatible_versions
WHERE app_id=%(app_id)s AND
(min_app_version='0' AND
max_app_version_int >= %(version_int)s) OR
(min_app_version_int <= %(version_int)s AND
max_app_version='*') OR
(min_app_version_int <= %(version_int)s AND
max_app_version_int >= %(version_int)s)) """)
else: # Not defined or 'strict'.
sql.append('AND appmax.version_int >= %(version_int)s ')
# Special case for bug 1031516.
if data['guid'] == 'firefox-hotfix@mozilla.org':
app_version = data['version_int']
hotfix_version = data['version']
if version_int('10') <= app_version <= version_int('16.0.1'):
if hotfix_version < '20121019.01':
sql.append("AND versions.version = '20121019.01' ")
elif hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
elif version_int('16.0.2') <= app_version <= version_int('24.*'):
if hotfix_version < '20130826.01':
sql.append("AND versions.version = '20130826.01' ")
sql.append('ORDER BY versions.id DESC LIMIT 1;')
self.cursor.execute(''.join(sql), data)
result = self.cursor.fetchone()
if result:
row = dict(zip([
'guid', 'type', 'disabled_by_user', 'min', 'max',
'file_id', 'file_status', 'hash', 'filename', 'version_id',
'datestatuschanged', 'strict_compat', 'releasenotes',
'version'],
list(result)))
row['type'] = base.ADDON_SLUGS_UPDATE[row['type']]
row['url'] = get_cdn_url(data['id'], row)
row['appguid'] = applications.APPS_ALL[data['app_id']].guid
data['row'] = row
return True
return False
def get_bad_rdf(self):
return bad_rdf
def get_rdf(self):
if self.is_valid():
if self.get_update():
rdf = self.get_good_rdf()
else:
rdf = self.get_no_updates_rdf()
else:
rdf = self.get_bad_rdf()
self.cursor.close()
if self.conn:
self.conn.close()
return rdf
def get_no_updates_rdf(self):
name = base.ADDON_SLUGS_UPDATE[self.data['type']]
return no_updates_rdf % ({'guid': self.data['guid'], 'type': name})
def get_good_rdf(self):
data = self.data['row']
data['if_hash'] = ''
if data['hash']:
data['if_hash'] = ('<em:updateHash>%s</em:updateHash>' %
data['hash'])
data['if_update'] = ''
if data['releasenotes']:
data['if_update'] = ('<em:updateInfoURL>%s%s%s/%%APP_LOCALE%%/'
'</em:updateInfoURL>' %
(settings.SITE_URL, '/versions/updateInfo/',
data['version_id']))
return good_rdf % data
def format_date(self, secs):
return '%s GMT' % formatdate(time() + secs)[:25]
def get_headers(self, length):
return [('Content-Type', 'text/xml'),
('Cache-Control', 'public, max-age=3600'),
('Last-Modified', self.format_date(0)),
('Expires', self.format_date(3600)),
('Content-Length', str(length))]
def log_exception(data):
(typ, value, traceback) = sys.exc_info()
error_log.error(u'Type: %s, %s. Query: %s' % (typ, value, data))
def application(environ, start_response):
status = '200 OK'
with statsd.timer('services.update'):
data = dict(parse_qsl(environ['QUERY_STRING']))
compat_mode = data.pop('compatMode', 'strict')
try:
update = Update(data, compat_mode)
output = force_bytes(update.get_rdf())
start_response(status, update.get_headers(len(output)))
except Exception:
log_exception(data)
raise
return [output]
|
|
from __future__ import absolute_import, unicode_literals
from django.db.backends import BaseDatabaseIntrospection
from . import ado_consts
try:
# Added with Django 1.7
from django.db.backends import FileInfo
except ImportError:
from collections import namedtuple
# Structure returned by the DB-API cursor.description interface (PEP 249)
FieldInfo = namedtuple('FieldInfo',
'name type_code display_size internal_size precision scale null_ok')
AUTO_FIELD_MARKER = -1000
BIG_AUTO_FIELD_MARKER = -1001
MONEY_FIELD_MARKER = -1002
class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_field_type(self, data_type, description):
field_type = self.data_types_reverse[data_type]
if (field_type == 'CharField'
and description.internal_size is not None
and description.internal_size > 8000):
field_type = 'TextField'
return field_type
def get_table_list(self, cursor):
"Return a list of table and view names in the current database."
cursor.execute("""\
SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_TYPE = 'BASE TABLE'
UNION
SELECT TABLE_NAME
FROM INFORMATION_SCHEMA.VIEWS
""")
return [row[0] for row in cursor.fetchall()]
def _is_auto_field(self, cursor, table_name, column_name):
"""Check if a column is an identity column.
See: http://msdn2.microsoft.com/en-us/library/ms174968.aspx
"""
sql = "SELECT COLUMNPROPERTY(OBJECT_ID(N'%s'), N'%s', 'IsIdentity')" % \
(table_name, column_name)
cursor.execute(sql)
return cursor.fetchone()[0]
def _get_table_field_type_map(self, cursor, table_name):
"""
Return a dict mapping field name to data type. DB-API cursor description
interprets the date columns as chars.
"""
cursor.execute('''
SELECT [COLUMN_NAME], [DATA_TYPE], [CHARACTER_MAXIMUM_LENGTH]
FROM INFORMATION_SCHEMA.COLUMNS
WHERE [TABLE_NAME] LIKE \'%s\'
''' % table_name)
results = dict([(c[0], (c[1], c[2])) for c in cursor.fetchall()])
return results
def _datatype_to_ado_type(self, datatype):
"""
Map datatype name to ado type.
"""
return {
'bigint': ado_consts.adBigInt,
'binary': ado_consts.adBinary,
'bit': ado_consts.adBoolean,
'char': ado_consts.adChar,
'date': ado_consts.adDBDate,
'datetime': ado_consts.adDBTimeStamp,
'datetime2': ado_consts.adDBTimeStamp,
'datetimeoffset': ado_consts.adDBTimeStamp,
'decimal': ado_consts.adDecimal,
'float': ado_consts.adDouble,
'image': ado_consts.adVarBinary,
'int': ado_consts.adInteger,
'money': MONEY_FIELD_MARKER,
'numeric': ado_consts.adNumeric,
'nchar': ado_consts.adWChar,
'ntext': ado_consts.adLongVarWChar,
'nvarchar': ado_consts.adVarWChar,
'smalldatetime': ado_consts.adDBTimeStamp,
'smallint': ado_consts.adSmallInt,
'smallmoney': MONEY_FIELD_MARKER,
'text': ado_consts.adLongVarChar,
'time': ado_consts.adDBTime,
'tinyint': ado_consts.adTinyInt,
'varbinary': ado_consts.adVarBinary,
'varchar': ado_consts.adVarChar,
}.get(datatype.lower(), None)
def get_table_description(self, cursor, table_name, identity_check=True):
"""Return a description of the table, with DB-API cursor.description interface.
The 'auto_check' parameter has been added to the function argspec.
If set to True, the function will check each of the table's fields for the
IDENTITY property (the IDENTITY property is the MSSQL equivalent to an AutoField).
When a field is found with an IDENTITY property, it is given a custom field number
of SQL_AUTOFIELD, which maps to the 'AutoField' value in the DATA_TYPES_REVERSE dict.
"""
table_field_type_map = self._get_table_field_type_map(cursor, table_name)
cursor.execute("SELECT * FROM [%s] where 1=0" % (table_name))
columns = cursor.description
items = list()
for column in columns:
column = list(column) # Convert tuple to list
# fix data type
data_type, char_length = table_field_type_map.get(column[0])
column[1] = self._datatype_to_ado_type(data_type)
if identity_check and self._is_auto_field(cursor, table_name, column[0]):
if column[1] == ado_consts.adBigInt:
column[1] = BIG_AUTO_FIELD_MARKER
else:
column[1] = AUTO_FIELD_MARKER
if column[1] == MONEY_FIELD_MARKER:
# force decimal_places=4 to match data type. Cursor description thinks this column is a string
column[5] = 4
elif column[1] == ado_consts.adVarWChar and char_length == -1:
# treat varchar(max) as text
column[1] = self._datatype_to_ado_type('text')
items.append(FieldInfo(*column))
return items
def _name_to_index(self, cursor, table_name):
"""Return a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return dict([(d[0], i) for i, d in enumerate(self.get_table_description(cursor, table_name, False))])
def get_relations(self, cursor, table_name):
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql, [table_name])
relations = cursor.fetchall()
relation_map = dict()
for source_column, target_table, target_column in relations:
target_field_dict = self._name_to_index(cursor, target_table)
target_index = target_field_dict[target_column]
source_index = source_field_dict[source_column]
relation_map[source_index] = (target_index, target_table)
return relation_map
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
source_field_dict = self._name_to_index(cursor, table_name)
sql = """
select
COLUMN_NAME = fk_cols.COLUMN_NAME,
REFERENCED_TABLE_NAME = pk.TABLE_NAME,
REFERENCED_COLUMN_NAME = pk_cols.COLUMN_NAME
from INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS ref_const
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS fk
on ref_const.CONSTRAINT_CATALOG = fk.CONSTRAINT_CATALOG
and ref_const.CONSTRAINT_SCHEMA = fk.CONSTRAINT_SCHEMA
and ref_const.CONSTRAINT_NAME = fk.CONSTRAINT_NAME
and fk.CONSTRAINT_TYPE = 'FOREIGN KEY'
join INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk
on ref_const.UNIQUE_CONSTRAINT_CATALOG = pk.CONSTRAINT_CATALOG
and ref_const.UNIQUE_CONSTRAINT_SCHEMA = pk.CONSTRAINT_SCHEMA
and ref_const.UNIQUE_CONSTRAINT_NAME = pk.CONSTRAINT_NAME
And pk.CONSTRAINT_TYPE = 'PRIMARY KEY'
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE fk_cols
on ref_const.CONSTRAINT_NAME = fk_cols.CONSTRAINT_NAME
join INFORMATION_SCHEMA.KEY_COLUMN_USAGE pk_cols
on pk.CONSTRAINT_NAME = pk_cols.CONSTRAINT_NAME
where
fk.TABLE_NAME = %s"""
cursor.execute(sql, [table_name])
relations = cursor.fetchall()
key_columns = []
key_columns.extend([(source_column, target_table, target_column)
for source_column, target_table, target_column in relations])
return key_columns
def get_indexes(self, cursor, table_name):
# Returns a dictionary of fieldname -> infodict for the given table,
# where each infodict is in the format:
# {'primary_key': boolean representing whether it's the primary key,
# 'unique': boolean representing whether it's a unique index}
sql = """
select
C.name as [column_name],
IX.is_unique as [unique],
IX.is_primary_key as [primary_key]
from
sys.tables T
join sys.index_columns IC on IC.object_id = T.object_id
join sys.columns C on C.object_id = T.object_id and C.column_id = IC.column_id
join sys.indexes IX on IX.object_id = T.object_id and IX.index_id = IC.index_id
where
T.name = %s
-- Omit multi-column keys
and not exists (
select *
from sys.index_columns cols
where
cols.object_id = T.object_id
and cols.index_id = IC.index_id
and cols.key_ordinal > 1
)
"""
cursor.execute(sql, [table_name])
constraints = cursor.fetchall()
indexes = dict()
for column_name, unique, primary_key in constraints:
indexes[column_name.lower()] = {"primary_key": primary_key, "unique": unique}
return indexes
data_types_reverse = {
AUTO_FIELD_MARKER: 'AutoField',
BIG_AUTO_FIELD_MARKER: 'sqlserver_ado.fields.BigAutoField',
MONEY_FIELD_MARKER: 'DecimalField',
ado_consts.adBoolean: 'BooleanField',
ado_consts.adChar: 'CharField',
ado_consts.adWChar: 'CharField',
ado_consts.adDecimal: 'DecimalField',
ado_consts.adNumeric: 'DecimalField',
ado_consts.adDate: 'DateField',
ado_consts.adDBDate: 'DateField',
ado_consts.adDBTime: 'TimeField',
ado_consts.adDBTimeStamp: 'DateTimeField',
ado_consts.adDouble: 'FloatField',
ado_consts.adSingle: 'FloatField',
ado_consts.adInteger: 'IntegerField',
ado_consts.adBigInt: 'BigIntegerField',
ado_consts.adSmallInt: 'SmallIntegerField',
ado_consts.adTinyInt: 'SmallIntegerField',
ado_consts.adVarChar: 'CharField',
ado_consts.adVarWChar: 'CharField',
ado_consts.adLongVarWChar: 'TextField',
ado_consts.adLongVarChar: 'TextField',
ado_consts.adBinary: 'BinaryField',
ado_consts.adVarBinary: 'BinaryField',
}
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
constraints = dict()
# getting indexes (primary keys, unique, regular)
sql = """
select object_id, name, index_id, is_unique, is_primary_key
from sys.indexes where object_id = OBJECT_ID(%s)
"""
cursor.execute(sql, [table_name])
for object_id, name, index_id, unique, primary_key in list(cursor.fetchall()):
sql = """
select name from sys.index_columns ic
inner join sys.columns c on ic.column_id = c.column_id and ic.object_id = c.object_id
where ic.object_id = %s and ic.index_id = %s
"""
cursor.execute(sql, [object_id, index_id])
columns = [row[0] for row in cursor.fetchall()]
constraint = {"columns": list(columns),
"primary_key": primary_key,
"unique": unique,
"index": True,
"check": False,
"foreign_key": None,
}
constraints[name] = constraint
# getting foreign keys
sql = """
select fk.object_id, fk.name, rt.name from sys.foreign_keys fk
inner join sys.tables rt on fk.referenced_object_id = rt.object_id
where fk.parent_object_id = OBJECT_ID(%s)
"""
cursor.execute(sql, [table_name])
for id, name, ref_table_name in list(cursor.fetchall()):
sql = """
select cc.name, rc.name from sys.foreign_key_columns fkc
inner join sys.columns rc on fkc.referenced_object_id = rc.object_id and fkc.referenced_column_id = rc.column_id
inner join sys.columns cc on fkc.parent_object_id = cc.object_id and fkc.parent_column_id = cc.column_id
where fkc.constraint_object_id = %s
"""
cursor.execute(sql, [id])
columns, fkcolumns = zip(*cursor.fetchall())
constraint = {"columns": list(columns),
"primary_key": False,
"unique": False,
"index": False,
"check": False,
"foreign_key": (ref_table_name, fkcolumns[0]),
}
constraints[name] = constraint
# get check constraints
sql = """
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK'
AND
kc.table_name = %s
"""
cursor.execute(sql, [table_name])
for constraint, column in list(cursor.fetchall()):
if column not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"index": False,
"check": True,
"foreign_key": None,
}
constraints[constraint]['columns'].append(column)
return constraints
|
|
from __future__ import unicode_literals
import datetime
import re
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.db.backends.utils import truncate_name
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text
from .base import Database
from .utils import InsertIdVar, Oracle_datetime, convert_unicode
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
_sequence_reset_sql = """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
args = {
'sq_name': self._get_sequence_name(table),
'tr_name': self._get_trigger_name(table),
'tbl_name': self.quote_name(table),
'col_name': self.quote_name(column),
}
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % args
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % args
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
elif lookup_type == 'week':
# IW = ISO week number
return "TO_CHAR(%s, 'IW')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, timedelta):
"""
NUMTODSINTERVAL converts number to INTERVAL DAY TO SECOND literal.
"""
return "NUMTODSINTERVAL(%06f, 'SECOND')" % (timedelta.total_seconds()), []
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not settings.USE_TZ:
return field_name
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE
# and cast it back to TIMESTAMP to strip the TIME ZONE details.
return "CAST((FROM_TZ(%s, '0:00') AT TIME ZONE '%s') AS TIMESTAMP)" % (field_name, tzname)
def datetime_cast_date_sql(self, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = 'TRUNC(%s)' % field_name
return sql, []
def datetime_cast_time_sql(self, field_name, tzname):
# Since `TimeField` values are stored as TIMESTAMP where only the date
# part is ignored, convert the field to the specified timezone.
field_name = self._convert_field_to_tz(field_name, tzname)
return field_name, []
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql, []
def time_trunc_sql(self, lookup_type, field_name):
# The implementation is similar to `datetime_trunc_sql` as both
# `DateTimeField` and `TimeField` are stored as TIMESTAMP where
# the date part of the later is ignored.
if lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
elif lookup_type == 'second':
sql = "CAST(%s AS DATE)" % field_name # Cast to DATE removes sub-second precision.
return sql
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
converters.append(self.convert_empty_values)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_text(value.read())
return value
def convert_binaryfield_value(self, value, expression, connection, context):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_datefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.date()
return value
def convert_timefield_value(self, value, expression, connection, context):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value
def convert_empty_values(self, value, expression, connection, context):
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
field = expression.output_field
if value is None and field.empty_strings_allowed:
value = ''
if field.get_internal_type() == 'BinaryField':
value = b''
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# https://cx-oracle.readthedocs.io/en/latest/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode): # NOQA: unicode undefined on PY3
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type, internal_type=None):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def pk_default_value(self):
return "NULL"
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % truncate_name(name.upper(), self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = self._sequence_reset_sql % {
'sequence': sequence_name,
'table': table_name,
'column': column_name,
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = self._sequence_reset_sql
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.remote_field.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def adapt_datefield_value(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def adapt_datetimefield_value(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
lhs, rhs = sub_expressions
return 'BITAND(-%(lhs)s-1,%(rhs)s)+%(lhs)s' % {'lhs': lhs, 'rhs': rhs}
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, placeholder_rows):
return " UNION ALL ".join(
"SELECT %s FROM DUAL" % ", ".join(row)
for row in placeholder_rows
)
def subtract_temporals(self, internal_type, lhs, rhs):
if internal_type == 'DateField':
lhs_sql, lhs_params = lhs
rhs_sql, rhs_params = rhs
return "NUMTODSINTERVAL(%s - %s, 'DAY')" % (lhs_sql, rhs_sql), lhs_params + rhs_params
return super(DatabaseOperations, self).subtract_temporals(internal_type, lhs, rhs)
|
|
#!/usr/bin/env python2
import collections
import pyximport;
import re
pyximport.install()
from model import Ext, Chromosome
import argparse
import copy
import operator
import networkx as nx
from networkx.algorithms import connected_components, dfs_successors
import matplotlib.pyplot as plt
import file_ops
# HELPER functions:
def vertex_name(genome, gene, copy, ext):
return "%s%s_%s%s" % (genome, gene, copy, ext)
def matching_edge_name(gene, copyA, copyB, ext):
return "x_%s,%s" % (vertex_name("A", gene, copyA, ext), vertex_name("B", gene, copyB, ext))
def balancing_edge_name(genome, gene1, copy1, ext1, gene2, copy2, ext2):
if (gene1, copy1, ext1) > (gene2, copy2, ext2):
gene1, copy1, ext1, gene2, copy2, ext2 = gene2, copy2, ext2, gene1, copy1, ext1
return "w_%s,%s" % (vertex_name(genome, gene1, copy1, ext1), vertex_name(genome, gene2, copy2, ext2))
def define_y_label(gene_count):
y_label = {}
idx = 1
for genome in ["A", "B"]:
for gene, copy_dict in gene_count[genome].iteritems():
for copy_i, type_i in copy_dict.iteritems():
y_label[(genome, gene, copy_i, Ext.HEAD)] = idx
idx += 1
y_label[(genome, gene, copy_i, Ext.TAIL)] = idx
idx += 1
return y_label
def balancing_extremities(gene_copies, exclude=None):
if exclude is None:
exclude = set()
for gene, copy_dict in gene_copies.iteritems():
for copy_i, type_i in copy_dict.iteritems():
if type_i == CopyType.BALANCING:
if (gene, copy_i, Ext.HEAD) not in exclude:
yield (gene, copy_i, Ext.HEAD)
if (gene, copy_i, Ext.TAIL) not in exclude:
yield (gene, copy_i, Ext.TAIL)
class CopyType:
REAL, BALANCING = ['r', 'b']
# just to better print cycles and paths in order:
def sort_component(G, comp, fmt=True):
# get degree-1 vertices:
degree_one = [v for v in comp if G.degree(v) == 1]
if len(degree_one) > 0:
v = degree_one[0]
else:
v = list(comp)[0]
# import ipdb; ipdb.set_trace()
succ = dfs_successors(G, v)
initial = v
sort = []
while True:
if fmt:
sort.append("%s%s_%s%s" % v)
else:
sort.append(v)
if v not in succ:
break
v = succ[v][0]
if v == initial:
break
return sort
def add_capping_genes(genome_a, genome_b):
max_chromosomes = max(genome_a.n_chromosomes(), genome_b.n_chromosomes())
for genome in [genome_a, genome_b]:
copy_idx = 1
for c in genome.chromosomes:
if not c.circular:
c.gene_order.append(0)
c.copy_number.append(copy_idx)
copy_idx += 1
c.circular = True
for i in range(genome.n_chromosomes(), max_chromosomes):
genome.add_chromosome(Chromosome([0], copy_number=[copy_idx], circular=True))
copy_idx += 1
def fix_cycle_y_z(comp, y_label, y_fix, z_fix, vertices_to_remove):
# get indexes of the y_i:
indexes = [(v, y_label[v]) for v in comp]
min_label = min([x[1] for x in indexes])
for v, label in indexes:
y_fix[label] = min_label
z_fix[label] = 0
z_fix[min_label] = 1
vertices_to_remove.extend(comp)
def build_gene_copies_dict(all_genes, genome_a, genome_b):
# store the copy number for each 'real' gene in each genome:
gene_copies_a = genome_a.gene_copies()
gene_copies_b = genome_b.gene_copies()
gene_copies = {"A": {}, "B": {}}
for gene in all_genes:
gene_copies["A"][gene] = {cn: CopyType.REAL for cn in gene_copies_a[gene]}
gene_copies["B"][gene] = {cn: CopyType.REAL for cn in gene_copies_b[gene]}
# now complete the list with 'balancing' genes
for gene in all_genes:
copy_a = len(gene_copies["A"][gene])
copy_b = len(gene_copies["B"][gene])
# get the max copy number, or 0 if set is empty, to know which label is free for the newly added
# balancing copies;
max_copy_a = max([cn for cn, c_type in gene_copies["A"][gene].iteritems()] + [0])
max_copy_b = max([cn for cn, c_type in gene_copies["B"][gene].iteritems()] + [0])
if copy_a < copy_b:
gene_copies["A"][gene].update({bal_copy: CopyType.BALANCING for bal_copy in
xrange(max_copy_a + 1, max_copy_a + 1 + copy_b - copy_a)})
if copy_b < copy_a:
gene_copies["B"][gene].update({bal_copy: CopyType.BALANCING for bal_copy in
xrange(max_copy_b + 1, max_copy_b + 1 + copy_a - copy_b)})
return gene_copies
def plot_bp(filename, master_graph, gene_copies, possible_matching, simplified=True):
# add isolated vertices (balancing extremities that are not fixed already)
for genome_i in ["A", "B"]:
for gene_i, copy_i, ext_i in balancing_extremities(gene_copies[genome_i]):
if gene_i in possible_matching and copy_i in possible_matching[gene_i][genome_i]:
# if (gene_i, copy_i) not in fixed_matching:
# print "add bal", (genome_i, gene_i, copy_i, ext_i)
master_graph.add_node((genome_i, gene_i, copy_i, ext_i))
# simplified:
if simplified:
edges = []
vertices = []
for comp in connected_components(master_graph):
if len(comp) == 1:
vertices.append(comp.pop())
continue
degree_one = tuple([v for v in comp if master_graph.degree(v) == 1])
edges.append(degree_one)
master_graph = nx.Graph()
master_graph.add_edges_from(edges)
master_graph.add_nodes_from(vertices)
# Relabel nodes to make it easier to read:
mapping = {}
normal = []
balancing = []
be = {genome_i: list(balancing_extremities(gene_copies[genome_i])) for genome_i in ["A", "B"]}
for v in master_graph.nodes():
genome_i, gene_i, copy_i, ext_i = v
mapping[v] = "$%s%s_{(%s)}^%s$" % v
if (gene_i, copy_i, ext_i) in be[genome_i]:
balancing.append(mapping[v])
else:
normal.append(mapping[v])
master_graph = nx.relabel_nodes(master_graph, mapping)
# Graphviz position:
# pos = nx.nx_agraph.graphviz_layout(master_graph, prog="fdp")
# custom position:
x_pos = 0
y_pos = {"A": 1, "B": 0}
pos = {}
for comp in sorted(connected_components(master_graph), key=lambda c: (-len(c), min(c))):
last_v = None
for v in sort_component(master_graph, comp, fmt=False):
if last_v == v[1]:
x_pos += 1
last_v = v[1]
pos[v] = (x_pos, y_pos[v[1]])
x_pos += 1
if x_pos > 7:
x_pos = 0
y_pos["A"] -= 2
y_pos["B"] -= 2
# draw and save:
for nodelist, color in [(normal, "lightgray"), (balancing, "lightblue")]:
nx.draw(master_graph, pos, font_size=5, nodelist=nodelist, node_color=color, linewidths=0.1, width=0.5,
node_size=400,
with_labels=True)
plt.savefig(filename, bbox_inches='tight')
def fix_new_matching(fixed_matching, edges_to_add, possible_matching, g_a, copy_a, copy_b):
fixed_matching[(g_a, copy_a)] = copy_b
# save edges to add to graph:
for ext in [Ext.HEAD, Ext.TAIL]:
edges_to_add.append((("A", g_a, copy_a, ext), ("B", g_a, copy_b, ext)))
# remove possible edges from other copies:
possible_matching[g_a]["A"].remove(copy_a)
possible_matching[g_a]["B"].remove(copy_b)
# if now there is just one possibility, also fix:
if len(possible_matching[g_a]["A"]) == 1:
copy_a = possible_matching[g_a]["A"].pop()
copy_b = possible_matching[g_a]["B"].pop()
fixed_matching[(g_a, copy_a)] = copy_b
del possible_matching[g_a]
# save edges to add to graph:
for ext in [Ext.HEAD, Ext.TAIL]:
edges_to_add.append((("A", g_a, copy_a, ext), ("B", g_a, copy_b, ext)))
######################################################################
# MAIN function
######################################################################
def dcj_dupindel_ilp(genome_a, genome_b, output, skip_balancing=False, fix_vars=True, solve=False, all_vs_all=False):
def solve_ilp(timelimit=60):
# import here, so only if actually solving we will need gurobi.
from gurobipy import read, GRB
# pycharm complains of gurobi commands, cannot see them from the import
model = read(filename)
# set some options:
# time limit in seconds:
model.params.timeLimit = timelimit
# not verbose:
# model.setParam('OutputFlag', False)
# MIP focus, from 0 to 3:
model.params.MIPFocus = 1 # best solutions, less focus on bounds.
model.optimize()
if model.status != GRB.Status.INFEASIBLE:
print('FINISHED: Best objective: %g' % model.objVal)
print('Optimization ended with status %d' % model.status)
model.write(filename + '.sol')
if model.status == GRB.INFEASIBLE:
model.computeIIS()
model.write("unfeasible.lp")
print('\nThe following constraint(s) cannot be satisfied:')
for c in model.getConstrs():
if c.IISConstr:
print('%s' % c.constrName)
else:
z = n = c = 0
solution_matching = collections.defaultdict(list)
matching_regexp = re.compile("x_A(\d+)_(\d+)h,B(\d+)_(\d+)h")
# get basic vars and matching:
for v in model.getVars():
if v.varName == "n":
n = v.x
elif v.varName == "c":
c = v.x
elif v.varName.startswith("z") and v.x >= 0.9:
z += 1
else:
m = matching_regexp.match(v.varName)
if m is not None and v.x == 1:
g_a, c_a, g_b, c_b = map(int, m.groups())
solution_matching[g_a].append((c_a, c_b))
from parse_orthology import build_correct_matching, parse_orthology_quality
correct_matching = build_correct_matching(genome_a, genome_b)
tp, fp, fn = parse_orthology_quality(solution_matching, correct_matching)
print "N: %d cycles:%d (%d fixed, %d from opt)" % (n, z + c, c, z)
print "Orthology. TP:%d FP:%d FN:%d" % (len(tp), len(fp), len(fn))
# print match_edges
# Now, analyse the BP graph, for the incomplete matching model, to find AA-, BB- and AB- components:
master_graph = nx.Graph()
# fixed vars:
# add matching edges of genes with single copy:
# for (gene, copy_a), copy_j in match_edges.iteritems():
for gene, pair_list in solution_matching.iteritems():
for copy_a, copy_b in pair_list:
for ext in [Ext.HEAD, Ext.TAIL]:
master_graph.add_edge(("A", gene, copy_a, ext), ("B", gene, copy_b, ext))
# add adjacency edges:
for genome, genome_name in [(genome_a, "A"), (genome_b, "B")]:
for (g_i, copy_a, e_i), (g_j, copy_j, e_j) in genome.adjacency_iter_with_copies():
master_graph.add_edge((genome_name, g_i, copy_a, e_i), (genome_name, g_j, copy_j, e_j))
count = {"A": 0, "B": 0, "AB": 0}
c = 0
# print "C:", len([x for x in connected_components(master_graph)])
for comp in connected_components(master_graph):
degree_one = [v for v in comp if master_graph.degree(v) == 1]
if len(degree_one) == 0:
c += 1
else:
if len(degree_one) != 2:
import ipdb;
ipdb.set_trace()
if degree_one[0][0] == degree_one[1][0]:
count[degree_one[0][0]] += 1
else:
count["AB"] += 1
print count
if skip_balancing:
print "Corrected distance: %d" % (model.objVal + count["AB"] / 2)
return model
# copy genomes to possibly make some changes:
genome_a = copy.deepcopy(genome_a)
genome_b = copy.deepcopy(genome_b)
add_capping_genes(genome_a, genome_b)
# since the gene set might be different for each genome, find all genes:
all_genes = genome_a.gene_set().union(genome_b.gene_set())
# find all gene copies
gene_copies = build_gene_copies_dict(all_genes, genome_a, genome_b)
# count balancing genes:
bal = {
g: sum([len([c for c in gene_copies[g][gene].itervalues() if c == CopyType.BALANCING]) for gene in all_genes])
for g in ["A", "B"]}
print "Balancing genes:A=%(A)d, B=%(B)d" % bal
# define the y labels (vertex = genome,gene,copy,ext) -> integer 1..n
y_label = define_y_label(gene_copies)
# store all possible matchings (edges) from each family:
fixed_matching = {}
possible_matching = {}
for gene in all_genes:
# if only 1 copy, matching is fixed:
if len(gene_copies["A"][gene]) == 1:
# fix the matching, then remove from the available copies
copy_a, type_a = gene_copies["A"][gene].items()[0]
copy_j, type_b = gene_copies["B"][gene].items()[0]
fixed_matching[(gene, copy_a)] = copy_j
else:
possible_matching[gene] = {"A": {copy_i for copy_i, type_i in gene_copies["A"][gene].items()},
"B": {copy_i for copy_i, type_i in gene_copies["B"][gene].items()}}
# Build the BP graph of fixed matchings to try to find more variables to fix:
y_fix = {}
z_fix = {}
balancing_fix = {"A": {}, "B": {}}
if fix_vars:
master_graph = nx.Graph()
# fixed vars:
# add matching edges of genes with single copy:
for (gene, copy_a), copy_j in fixed_matching.iteritems():
for ext in [Ext.HEAD, Ext.TAIL]:
master_graph.add_edge(("A", gene, copy_a, ext), ("B", gene, copy_j, ext))
# add adjacency edges:
for genome, genome_name in [(genome_a, "A"), (genome_b, "B")]:
for (g_i, copy_a, e_i), (g_j, copy_j, e_j) in genome.adjacency_iter_with_copies():
master_graph.add_edge((genome_name, g_i, copy_a, e_i), (genome_name, g_j, copy_j, e_j))
# Search components to fix:
rescan = True
edges_to_add = []
vertices_to_remove = []
ab_components = set()
while rescan:
rescan = False
# Pre-scan:
# add and remove edges detected from previous rounds:
master_graph.add_edges_from(edges_to_add)
master_graph.remove_nodes_from(vertices_to_remove)
edges_to_add = []
vertices_to_remove = []
# fix AB-components; while I have at least 2, join pairs arbitrarily:
while len(ab_components) > 1:
a_i, b_i = ab_components.pop()
a_j, b_j = ab_components.pop()
master_graph.add_edge(a_i, a_j)
balancing_fix["A"][a_i[1:]] = a_j[1:]
balancing_fix["A"][a_j[1:]] = a_i[1:]
master_graph.add_edge(b_i, b_j)
balancing_fix["B"][b_i[1:]] = b_j[1:]
balancing_fix["B"][b_j[1:]] = b_i[1:]
# Now I search for vertices that have only balancing vertices as matching
# candidates. If that is the case, I can fix them arbitrarly.
fix_only_bal = True
if fix_only_bal:
for gene in sorted(possible_matching):
set_a = possible_matching[gene]["A"]
set_b = possible_matching[gene]["B"]
if all([gene_copies["A"][gene][copy_a] == CopyType.BALANCING for copy_a in set_a]) or all(
[gene_copies["B"][gene][copy_b] == CopyType.BALANCING for copy_b in set_b]):
for copy_a, copy_b in zip(set_a, set_b):
fixed_matching[(gene, copy_a)] = copy_b
# save edges to add to graph:
for ext in [Ext.HEAD, Ext.TAIL]:
# edges_to_add.append((("A", gene, copy_a, ext), ("B", gene, copy_b, ext)))
master_graph.add_edge(("A", gene, copy_a, ext), ("B", gene, copy_b, ext))
rescan = True
# remove from possible matching:
del possible_matching[gene]
# now loop for each connected component, fixing cycles and trying to close paths to cycles when possible.
for comp in connected_components(master_graph):
# can only consider even components;
if len(comp) % 2 != 0:
continue
# get degree-1 vertices:
degree_one = [v for v in comp if master_graph.degree(v) == 1]
# if two degree one vertices, it is a path;
if len(degree_one) == 2:
genome_i, g_i, copy_a, e_i = degree_one[0]
genome_j, g_j, copy_j, e_j = degree_one[1]
# 1 - check if both nodes are balancing, to find AA-, BB- and AB- paths that can be fixed.
i_is_balancing = g_i != 0 and gene_copies[genome_i][g_i][copy_a] == CopyType.BALANCING
j_is_balancing = g_j != 0 and gene_copies[genome_j][g_j][copy_j] == CopyType.BALANCING
if i_is_balancing and j_is_balancing:
# open-path, both ends are balancing.
# If AA- or BB-path, close it to a cycle:
if genome_i == genome_j:
# fix the cycle:
fix_cycle_y_z(comp, y_label, y_fix, z_fix, vertices_to_remove)
# fix the balancing variables if we have them:
if not skip_balancing:
balancing_fix[genome_i][degree_one[0][1:]] = degree_one[1][1:]
balancing_fix[genome_i][degree_one[1][1:]] = degree_one[0][1:]
else:
# If not, it is AB-, add to the list to try to make pairs.
if skip_balancing: # if not using balancing edges, I can fix the AB directly, instead of
# doing the merge in pairs;
fix_cycle_y_z(comp, y_label, y_fix, z_fix, vertices_to_remove)
else:
# merge in pairs:
ab_components.add(tuple(sorted(degree_one)))
if len(ab_components) > 1:
rescan = True
# Not open path; then, check if the path has homologous extremities at both ends, so I can close
# to a path:
elif genome_i != genome_j and g_i == g_j and e_i == e_j:
# invert to put genome A always in variables _i :
if genome_j == "A":
genome_i, g_i, copy_a, e_i, genome_j, g_j, copy_j, e_j = genome_j, g_j, copy_j, e_j, genome_i, g_i, copy_a, e_i
# check conflict, only add edge if it's in the allowed edges:
if g_i in possible_matching and copy_a in possible_matching[g_i]["A"] and copy_j in \
possible_matching[g_i]["B"]:
# new edges, re-scan:
rescan = True
fix_new_matching(fixed_matching, edges_to_add, possible_matching, g_i, copy_a, copy_j)
# if there are no degree one vertices, it is a cycle; I can fix the y_i and z_i for this cycle:
elif len(degree_one) == 0:
fix_cycle_y_z(comp, y_label, y_fix, z_fix, vertices_to_remove)
rescan = True
# DRAW:
draw_bp = False
if draw_bp:
plot_bp('graph.pdf', master_graph, gene_copies, possible_matching)
# all fixed, generate ILP
# to make it easier to find the matching edges, specially when limiting edges from balancing genes,
# I will build a gene connections graph;
gene_connection = nx.DiGraph() # make it directed, so the vertex of A is always 1st on the edge tuple.
for gene in possible_matching.iterkeys():
set_a = possible_matching[gene]["A"]
set_b = possible_matching[gene]["B"]
# All vs all model:
if all_vs_all:
for copy_a in set_a:
for copy_b in set_b:
gene_connection.add_edge(("A", gene, copy_a), ("B", gene, copy_b))
else:
# try to minimise needed matching edges for balancing nodes:
real_a = [cp for cp in set_a if gene_copies["A"][gene][cp] == CopyType.REAL]
real_b = [cp for cp in set_b if gene_copies["B"][gene][cp] == CopyType.REAL]
# all real, then all-vs-all:
if len(real_a) == len(real_b):
for copy_a in set_a:
for copy_b in set_b:
gene_connection.add_edge(("A", gene, copy_a), ("B", gene, copy_b))
# a has balancing:
if len(real_a) < len(real_b):
balancing_a = [cp for cp in set_a if gene_copies["A"][gene][cp] == CopyType.BALANCING]
# the real in A match the real in B (which are all)
for copy_a in real_a:
for copy_b in set_b:
gene_connection.add_edge(("A", gene, copy_a), ("B", gene, copy_b))
# then, the balancing in A have len(real_a)+1 incident edges
list_b = list(set_b)
for idx, copy_a in enumerate(balancing_a):
for j in range(len(real_a) + 1):
gene_connection.add_edge(("A", gene, copy_a), ("B", gene, list_b[idx + j]))
# b has balancing:
else:
balancing_b = [cp for cp in set_b if gene_copies["B"][gene][cp] == CopyType.BALANCING]
# the real in B match the real in A (which are all)
for copy_b in real_b:
for copy_a in set_a:
gene_connection.add_edge(("A", gene, copy_a), ("B", gene, copy_b))
# then, the balancing in B have len(real_b)+1 incident edges
list_a = list(set_a)
for idx, copy_b in enumerate(balancing_b):
for j in range(len(real_b) + 1):
gene_connection.add_edge(("A", gene, list_a[idx + j]), ("B", gene, copy_b))
# Start building constraints:
constraints = []
# consistency and matching 1-to-1
# Fixed matching:
# sorting just to make it nicer looking:
constraints.append("\ Fixed matching:")
for (gene, copy_a), copy_b in sorted(fixed_matching.items(), key=lambda pair: pair[0]):
constraints.append("%s = 1" % matching_edge_name(gene, copy_a, copy_b, Ext.TAIL))
constraints.append("%s = 1" % matching_edge_name(gene, copy_a, copy_b, Ext.HEAD))
# HEAD TAIL consistency:
constraints.append("\ Head/Tail consistency:")
for (_, gene_a, copy_a), (_, gene_b, copy_b) in gene_connection.edges_iter():
constraints.append("%s - %s = 0" % (
matching_edge_name(gene_a, copy_a, copy_b, Ext.TAIL),
matching_edge_name(gene_a, copy_a, copy_b, Ext.HEAD)))
# 1 Matching per node :
constraints.append("\ Degree 1 per node (Matching):")
# for all vertices:
for v in gene_connection.nodes_iter():
# find the incident edges:
if v[0] == "A":
edges = gene_connection.out_edges_iter
else:
edges = gene_connection.in_edges_iter
incident = [matching_edge_name(gene_a, copy_a, copy_b, Ext.TAIL) for
(_, gene_a, copy_a), (_, gene_b, copy_b) in edges(v)]
# sum of incidents is 1:
constraints.append("%s = 1" % (" + ".join(incident)))
if not skip_balancing:
constraints.append("\ Balancing:")
for genome in ["A", "B"]:
constraints.append("\ Genome %s" % genome)
for gene_i, copy_a, ext_i in balancing_extremities(gene_copies[genome]):
# check if fixed:
if (gene_i, copy_a, ext_i) in balancing_fix[genome]:
gene_j, copy_j, ext_j = balancing_fix[genome][(gene_i, copy_a, ext_i)]
if (gene_i, copy_a, ext_i) < (gene_j, copy_j, ext_j):
constraints.append(
"%s = 1" % balancing_edge_name(genome, gene_i, copy_a, ext_i, gene_j, copy_j, ext_j))
# if not, matching 1-to-1:
else:
constraints.append(
" + ".join([balancing_edge_name(genome, gene_i, copy_a, ext_i, gene_j, copy_j, ext_j) for
gene_j, copy_j, ext_j in
balancing_extremities(gene_copies[genome], exclude=balancing_fix[genome].keys())
if
(gene_i, copy_a, ext_i) != (gene_j, copy_j, ext_j)]) + " = 1")
constraints.append("\ Labelling")
# for each adjacency, fix the label of adjacent genes:
constraints.append("\\ Adjacent nodes have the same label:")
for genome, genome_name in [(genome_a, "A"), (genome_b, "B")]:
for (g_i, copy_a, ext_i), (g_j, copy_j, ext_j) in genome.adjacency_iter_with_copies():
v_i = genome_name, g_i, copy_a, ext_i
v_j = genome_name, g_j, copy_j, ext_j
# if already fixed, skip
if y_label[v_i] in y_fix and y_label[v_j] in y_fix:
continue
# if the edge is 0 for sure, also skip:
constraints.append("y_%s - y_%s = 0 \\ %s <-> %s " % (y_label[v_i], y_label[v_j], v_i, v_j))
#
constraints.append("\\ Matching extremities have the same label:")
# if extremities are matched, but I don't know the y_i (cycle was not closed in the fixing phase),
# then I know that the y_i's of these extremities are equal:
constraints.append("\\ Fixed matching without fixed y_i:")
for (gene, copy_a) in sorted(fixed_matching):
copy_j = fixed_matching[(gene, copy_a)]
for ext in [Ext.HEAD, Ext.TAIL]:
y_i = y_label[("A", gene, copy_a, ext)]
y_j = y_label[("B", gene, copy_j, ext)]
# only add if this y_i's aren't already fixed
if y_i not in y_fix and y_j not in y_fix:
constraints.append("y_%s - y_%s = 0 " % (y_i, y_j))
# for the "open" matching, for each edge I add the "y fixing" restrictions, that force the y_i's
# to be equal whenever the edge variable is set to 1.
constraints.append("\\ Open matching:")
for (_, gene_a, copy_a), (_, gene_b, copy_b) in gene_connection.edges_iter():
for ext in [Ext.HEAD, Ext.TAIL]:
y_a = y_label[("A", gene_a, copy_a, ext)]
y_b = y_label[("B", gene_b, copy_b, ext)]
constraints.append(
"y_%s - y_%s + %s %s <= %d" % (
y_a, y_b, y_a, matching_edge_name(gene_a, copy_a, copy_b, ext), y_a))
constraints.append(
"y_%s - y_%s + %s %s <= %d" % (
y_b, y_a, y_b, matching_edge_name(gene_a, copy_a, copy_b, ext), y_b))
if not skip_balancing:
constraints.append("\\ Balancing edges have same label:")
for genome in ["A", "B"]:
constraints.append("\\ Genome %s" % genome)
for gene_i, copy_a, ext_i in balancing_extremities(gene_copies[genome],
exclude=balancing_fix[genome].keys()):
for gene_j, copy_j, ext_j in balancing_extremities(gene_copies[genome],
exclude=balancing_fix[genome].keys()):
if (gene_i, copy_a, ext_i) >= (gene_j, copy_j, ext_j):
continue
y_i = y_label[(genome, gene_i, copy_a, ext_i)]
y_j = y_label[(genome, gene_j, copy_j, ext_j)]
# should not have someone here if I'm excluding fixed edges:
if y_i in y_fix and y_j in y_fix:
continue
constraints.append("y_%s - y_%s + %s %s <= %d" % (
y_i, y_j, y_i, balancing_edge_name(genome, gene_i, copy_a, ext_i, gene_j, copy_j, ext_j), y_i))
constraints.append("y_%s - y_%s + %s %s <= %d" % (
y_j, y_i, y_j, balancing_edge_name(genome, gene_i, copy_a, ext_i, gene_j, copy_j, ext_j), y_j))
# z variables: since all cycles have to contains vertices from both genomes, we only add z variables
# for genome A, that have smallest labels, so a genome B z variable will never be =1.
constraints.append("\\ Z variables")
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if vertex[0] == "A":
if i not in z_fix:
constraints.append("%d z_%s - y_%s <= 0" % (i, i, i))
#
# # number of genes, to fix distance:
n_genes = sum([len(copies) for copies in gene_copies["A"].itervalues()])
constraints.append("n = %d" % n_genes)
# # number of fixed cycles
constraints.append("c = %d" % (sum(z_fix.itervalues())))
#
# # bounds:
bounds = []
for i in sorted(y_label.itervalues()):
if i not in y_fix:
bounds.append("y_%d <= %d" % (i, i))
#
# # variables:
binary = []
#
# # matching edges
matching = ["\ Fixed matching:"]
for (gene, copy_a), copy_b in fixed_matching.iteritems():
matching.append(matching_edge_name(gene, copy_a, copy_b, Ext.TAIL))
matching.append(matching_edge_name(gene, copy_a, copy_b, Ext.HEAD))
matching.append("\ Open matching:")
for (_, gene_a, copy_a), (_, gene_b, copy_b) in gene_connection.edges_iter():
for ext in [Ext.HEAD, Ext.TAIL]:
matching.append(matching_edge_name(gene_a, copy_a, copy_b, ext))
print "%d fixed matching edges" % (len(fixed_matching) * 2)
print "%d open matching edges" % (len(gene_connection.edges()) * 2)
binary.extend(matching)
if not skip_balancing:
# balancing edges:
balancing_edges = [balancing_edge_name(genome, gene_i, copy_a, ext_i, gene_j, copy_j, ext_j) for
genome
in ["A", "B"] for gene_i, copy_a, ext_i in
balancing_extremities(gene_copies[genome], exclude=balancing_fix[genome].keys()) for
gene_j, copy_j, ext_j
in balancing_extremities(gene_copies[genome], exclude=balancing_fix[genome].keys()) if
(gene_i, copy_a, ext_i) < (gene_j, copy_j, ext_j)]
print "%d balancing edges" % len(balancing_edges)
binary.extend(balancing_edges)
#
# z cycles:
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if i in z_fix: # and z_fix[i] == 0:
continue
if vertex[0] == "B":
continue
binary.append("z_%d" % i)
#
# # Y label are general:
# TODO: remove unused y' and z's from model. If y=1, it can be removed, just set z=1.
general = []
for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)):
if i not in y_fix:
general.append("y_%d" % i)
#
# # number of genes and fixed cycles:
general.append("n")
general.append("c")
# # objective function:
z_obj = " - ".join(["z_%d" % i for vertex, i in sorted(y_label.items(), key=operator.itemgetter(1)) if
vertex[0] == "A" and i not in z_fix])
objective = ["obj: n - c %s" % ("- " + z_obj if len(z_obj) > 0 else "")]
# write ILP:
with open(output, "w") as f:
for header, lines in [("Minimize", objective), ("Subject to", constraints),
("Bounds", bounds), ("Binary", binary), ("General", general)]:
print >> f, header
print >> f, "\n".join(lines)
if solve:
model = solve_ilp(timelimit=60)
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Generates and optionally solve an ILP for the DCJ duplication and indel distance.")
parser.add_argument("-s", "--solve", action="store_true", default=False, help="Solve the model with Gurobi.")
parser.add_argument("-sb", "--skip_balancing", action="store_true", default=False,
help="Do not add balancing edges.")
parser.add_argument("-aa", "--all_vs_all", action="store_true", default=False,
help="All vs all edges from balancing nodes. More edges, just to compare.")
parser.add_argument("-sf", "--skip_fixing", action="store_false", default=True,
help="Do not try to fix variables.")
parser.add_argument("-t", "--timelimit", type=int, default=60,
help="Time limit in seconds for the solver. (default 60 secs.)")
parser.add_argument("-o", "--fileout", type=str,
help="Name for the ILP file.")
input_type = parser.add_mutually_exclusive_group(required=True)
input_type.add_argument("-g", type=str, nargs=3, help="Genomes file, idx 1 and 2 of genomes (0-indexed).")
input_type.add_argument("-c", type=str, nargs=2, help="Two coser files.")
param = parser.parse_args()
if param.g is not None:
filename, n1, n2 = param.g
genomes = file_ops.open_genomes_with_copy_number(filename)
g1 = genomes[int(n1)]
g2 = genomes[int(n2)]
elif param.c is not None:
g1 = file_ops.open_coser_genome(param.c[0])
g2 = file_ops.open_coser_genome(param.c[1])
filename = "ilp"
if param.fileout is None:
filename = "%s_%s_%s%s.lp" % (filename, g1.name, g2.name, "_nobal" if param.skip_balancing else "")
else:
filename = param.fileout
dcj_dupindel_ilp(g1, g2, filename, skip_balancing=param.skip_balancing, fix_vars=param.skip_fixing,
solve=param.solve, all_vs_all=param.all_vs_all)
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'numa_topology']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining."""
if not expected_attrs:
return expected_attrs
return [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
class Instance(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
# Version 1.13: Added delete_metadata_key()
# Version 1.14: Added numa_topology
# Version 1.15: PciDeviceList 1.1
VERSION = '1.15'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True)
}
obj_extra_fields = ['name']
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
def obj_make_compatible(self, primitive, target_version):
target_version = utils.convert_version_to_tuple(target_version)
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
if target_version < (1, 14) and 'numa_topology' in primitive:
del primitive['numa_topology']
if target_version < (1, 10) and 'info_cache' in primitive:
# NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4
self.info_cache.obj_make_compatible(primitive['info_cache'],
'1.4')
primitive['info_cache']['nova_object.version'] = '1.4'
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we couldn't handle unicode in
# string fields, so squash it here
for field in [x for x in unicode_attributes if x in primitive
and primitive[x] is not None]:
primitive[field] = primitive[field].encode('ascii', 'replace')
if target_version < (1, 15) and 'pci_devices' in primitive:
# NOTE(baoli): Instance <= 1.14 (icehouse) had PciDeviceList 1.0
# NOTE(vish): pci_devices is a list object so we must pull the
# underlying primitive out of the nova_object_data.
self.pci_devices.obj_make_compatible(
primitive['pci_devices']['nova_object.data'], '1.0')
primitive['pci_devices']['nova_object.version'] = '1.0'
if target_version < (1, 6):
# NOTE(danms): Before 1.6 there was no pci_devices list
if 'pci_devices' in primitive:
del primitive['pci_devices']
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'numa_topology' in expected_attrs:
instance._load_numa_topology()
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst['security_groups'])
instance['security_groups'] = sec_groups
instance.obj_reset_changes()
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
numa_topology = updates.pop('numa_topology', None)
db_inst = db.instance_create(context, updates)
if numa_topology:
expected_attrs.append('numa_topology')
numa_topology.instance_uuid = db_inst['uuid']
numa_topology.create(context)
self._from_db_object(context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(context, self.uuid,
constraint=constraint)
self._from_db_object(context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
self.info_cache.save(context)
def _save_security_groups(self, context):
for secgroup in self.security_groups:
secgroup.save(context)
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_numa_topology(self, context):
# NOTE(ndipanov): No need for this yet.
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
@base.remotable
def save(self, context, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param:context: Security context
:param:expected_task_state: Optional tuple of valid task states
for the instance to be in
:param:expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(self[field], base.NovaObject)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_LE('No save handler for %s'), field,
instance=self)
elif field in changes:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
if (self.VERSION == '1.9' and
expected_task_state == 'image_snapshot'):
# NOTE(danms): Icehouse introduced a pending state which
# Havana doesn't know about. If we're an old instance,
# tolerate the pending state as well
expected_task_state = [
expected_task_state, 'image_snapshot_pending']
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, inst_ref)
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
notifications.send_update(context, old_ref, inst_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, context, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field):
if field == 'info_cache':
self.info_cache.refresh()
# NOTE(danms): Make sure this shows up as touched
self.info_cache = self.info_cache
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self):
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# FIXME(comstud): This should be optimized to only load the attr.
if attrname == 'fault':
# NOTE(danms): We handle fault differently here so that we
# can be more efficient
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
else:
self._load_generic(attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
db_flavor = flavors.extract_flavor(self, prefix)
flavor = objects.Flavor(self._context)
for key in flavors.system_metadata_flavor_props:
flavor[key] = db_flavor[key]
return flavor
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
self.system_metadata = flavors.save_flavor_info(
self.system_metadata, flavor, prefix)
self.save()
def delete_flavor(self, namespace):
self.system_metadata = flavors.delete_flavor_info(
self.system_metadata, "%s_" % namespace)
self.save()
@base.remotable
def delete_metadata_key(self, context, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
instance_dict = base.obj_to_primitive(self)
notifications.send_update(context, instance_dict, instance_dict)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added use_slave to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
# Version 1.3: Added use_slave to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
# Version 1.6: Instance <= version 1.13
# Version 1.7: Added use_slave to get_active_by_window_joined
# Version 1.8: Instance <= version 1.14
# Version 1.9: Instance <= version 1.15
VERSION = '1.9'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
child_versions = {
'1.1': '1.9',
# NOTE(danms): Instance was at 1.9 before we added this
'1.2': '1.11',
'1.3': '1.11',
'1.4': '1.12',
'1.5': '1.12',
'1.6': '1.13',
'1.7': '1.13',
'1.8': '1.14',
'1.9': '1.15',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = db.instance_get_active_by_window_joined(context,
begin,
end,
project_id,
host)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = timeutils.isotime(begin)
end = timeutils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
#
from Tkinter import Tk
from tkFileDialog import askopenfilename
from tkSimpleDialog import askstring
from tkMessageBox import askyesno
from zipfile import ZipFile
from datetime import datetime
import os
import shutil
import sys
import time
from Axon.ThreadedComponent import threadedcomponent
from Axon.Ipc import WaitComplete, producerFinished, shutdownMicroprocess
class Decks(threadedcomponent):
# Many of these inboxes and outboxes are temporary until the structure is finalised
Inboxes = {
"inbox" : "Button click events result in messages to this inbox",
"fromEmail" : "Status messages from the e-mail component are received back here",
"control" : "",
}
Outboxes = {
"outbox" : "",
"toTicker" : "Sends text messages out to the Ticker component for display to the user",
"toCanvas" : "Sends drawing instructions out to the Canvas component",
"toSequencer" : "Sends slide navigation messages out to the CheckpointSequencer component",
"toEmail" : "Requests to send e-mails are sent through this outbox",
"signal" : "",
}
def __init__(self, scribblesdir, deckdir, email):
super(Decks, self).__init__()
self.scribblesdir = scribblesdir
self.deckdir = deckdir
self.email = email
def shutdown(self):
"""Return 0 if a shutdown message is received, else return 1."""
if self.dataReady("control"):
msg=self.recv("control")
if isinstance(msg,producerFinished) or isinstance(msg,shutdownMicroprocess):
self.send(producerFinished(self),"signal")
return 0
return 1
def main(self):
while self.shutdown():
while self.dataReady("fromEmail"):
status = self.recv("fromEmail")
if status == "sent":
self.send(". Deck e-mailed successfully","toTicker")
else:
self.send(". Error sending deck by e-mail: " + status,"toTicker")
while self.dataReady("inbox"):
cmd = self.recv("inbox")
if isinstance(cmd,list):
if (cmd[0] == "delete"):
self.deleteslide(cmd[1])
else:
self.handleCommand(cmd)
# yield 1
# self.pause()
# yield 1
time.sleep(0.1)
def fixNumbering(self):
exists = 1
slides = os.listdir(self.scribblesdir)
slides.sort()
for x in slides:
if x == "slide." + str(exists) + ".png":
# This slide exists, skip to next one
pass
else:
# This slide doesn't exist, find the next one up and copy it down
try:
shutil.move(self.scribblesdir + "/" + x,self.scribblesdir + "/slide." + str(exists) + ".png")
except Exception, e:
sys.stderr.write("Failed to renumber slides. There may be an error in the sequence")
sys.stderr.write(str(e))
exists += 1
def handleCommand(self, cmd):
cmd = cmd.upper()
if cmd=="LOADDECK":
self.loaddeck()
elif cmd=="SAVEDECK":
self.savedeck()
elif cmd=="CLEARSCRIBBLES":
self.clearscribbles()
elif cmd== "QUIT":
self.quit()
def loaddeck(self):
root = Tk()
root.withdraw()
filename = askopenfilename(filetypes=[("Zip Archives",".zip")],initialdir=self.deckdir,title="Load Slide Deck",parent=root)
root.destroy()
if filename != "":
root = Tk()
root.withdraw()
password = askstring("Deck Password","Please enter the password for this zip file, or press cancel if you believe there isn't one:", parent=root)
root.destroy()
if filename:
try:
unzipped = ZipFile(filename)
self.clearscribbles()
if password != None:
unzipped.extractall(path=self.scribblesdir,pwd=password)
else:
unzipped.extractall(path=self.scribblesdir,pwd="")
num_pages = 0
for x in os.listdir(self.scribblesdir):
if (os.path.splitext(x)[1] == ".png"):
num_pages += 1
self.send(["first",num_pages], "toSequencer")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Deck loaded successfully","toTicker")
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to open the deck specified. You may have entered the password incorrectly","toTicker")
def savedeck(self):
num_pages = 0
for x in os.listdir(self.scribblesdir):
if (os.path.splitext(x)[1] == ".png"):
num_pages += 1
if num_pages > 0:
dt = datetime.now()
filename = dt.strftime("%Y%m%d-%H%M%S")
filename = filename + ".zip"
root = Tk()
root.withdraw()
success = False
if askyesno("Deck Password","Would you like this deck to be password protected?",parent=root):
root.destroy()
root = Tk()
root.withdraw()
password = ""
while password == "":
password = askstring("Deck Password","Please enter a password for the zip file:", parent=root)
if password != None:
# Ensure the user hasn't pressed Cancel - if not, proceed, otherwise don't save
try:
os.system("zip -j -q -P " + password + " " + self.deckdir + "/" + filename + " " + self.scribblesdir + "/*.png")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file '" + filename + "' created successfully with password","toTicker")
success = True
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to write to zip file '" + filename + "'","toTicker")
else:
try:
os.system("zip -j -q " + self.deckdir + "/" + filename + " " + self.scribblesdir + "/*.png")
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Zip file '" + filename + "' created successfully without password","toTicker")
success = True
except Exception, e:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Failed to write to zip file '" + filename + "'","toTicker")
root.destroy()
if success == True and self.email == True:
# Ask if the user wants to e-mail a copy to themselves
root = Tk()
root.withdraw()
if askyesno("E-mail Deck","Would you like to send a copy of this deck by e-mail?",parent=root):
root.destroy()
root = Tk()
root.withdraw()
address = ""
while address == "":
address = askstring("E-mail Deck","Please enter an e-mail address. Multiple addresses can be entered if separated by semicolons:", parent=root)
if address != None:
# We have an address - no idea if it's valid or not, but this is where we'll send the message
body = "Your whiteboard deck has been attached\n\nSent via Whiteboard"
self.send([address,"Whiteboard Deck " + filename,body,[self.deckdir + "/" + filename]], "toEmail")
root.destroy()
else:
self.send(chr(0) + "CLRTKR", "toTicker")
self.send("Save failed: No slides appear to exist","toTicker")
def clearscribbles(self):
try:
for x in os.listdir(self.scribblesdir):
if os.path.splitext(x)[1] == ".png":
os.remove(self.scribblesdir + "/" + x)
self.send([["clear"]], "toCanvas")
self.send("reset", "toSequencer")
except Exception, e:
sys.stderr.write("Failed to clear scribbles - couldn't remove " + str(self.scribblesdir + "/" + x))
def deleteslide(self,current):
try:
os.remove(self.scribblesdir + "/slide." + str(current) + ".png")
except Exception, e:
sys.stderr.write("Error deleting slide " + str(current))
self.fixNumbering()
self.send("loadsafe","toSequencer")
def quit(self):
root = Tk()
root.withdraw()
kill = False
if askyesno("Confirm","Unsaved changes will be lost. Are you sure you want to quit?",parent=root):
# perform quit
kill = True
#pygame.quit() # This isn't the right way to do it!
# Also, saving won't work as the program exits before it's happened
root.destroy()
if kill:
print("Exiting")
self.scheduler.stop()
|
|
# Copyright (c) 2011 Dave McCoy (dave.mccoy@cospandesign.com)
#
# This file is part of Nysa.
#
# (http://wiki.cospandesign.com/index.php?title=Nysa.org)
#
# Nysa is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# Nysa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nysa; If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
"""Module Factory
Generates verilog modules. The generation of a verilog module may by simply
copying the module or generating the module with a script or a combination
of the two
"""
__author__ = 'dave.mccoy@cospandesign.com (Dave McCoy)'
"""Changes:
06/11/2012
-Added Documentation and licsense
-Moved two functions from sapfile to utils
is_module_in_file
find_module_filename
09/18/2013
-Changed license to GPL V3
"""
import os
import glob
import sys
import importlib
from inspect import isclass
from ibuilder_error import ModuleNotFound
from ibuilder_error import ModuleFactoryError
import utils
sys.path.append(os.path.join( os.path.dirname(__file__),
"gen_scripts"))
sys.path.append(os.path.join( os.path.dirname(__file__),
os.pardir,
os.pardir,
"cbuilder",
"scripts"))
from gen_scripts import gen
class ModuleProcessor:
"""Generates a module
Based on the script template the modules may be created with a gen script
or simply copying a file
"""
def __init__ (self, user_paths = []):
# print "SAPLIB_BASE: " + os.environ["SAPLIB_BASE"]
# print "Path: " + str(sys.path)
self.user_paths = list(set(user_paths))
self.gen_module = None
self.gen = None
self.buf = ""
self.tags = {}
self.verilog_file_list = []
self.verilog_dependency_list = []
return
def write_file(self, location = "", filename=""):
"""write_file
Search through the specified location, if the location doesn't exist then
create the location.
then write out the specified file
Args:
location: the location where the file is to be written
filename: the name of the output file to write
Returns:
Nothing
Raises:
IOError
"""
home = False
location = utils.resolve_path(location)
if not os.path.exists(location):
utils.create_dir(location)
fname = os.path.join(location, filename)
fileout = open(fname, "w")
fileout.write(self.buf)
fileout.close()
return
def apply_tags(self):
"""apply_tags
Substitutes that tags with the data specific to this project
Args:
None
Return:
Nothing
Raises:
KeyError
"""
#search through the buf for any tags that match something within
#our tag map
try:
self.buf = self.buf.format(self.tags)
except KeyError as err:
if ('$' in err):
raise KeyError(str(err))
except ValueError as err:
print "Value Error with the Buffer (shown below): %s" % str(err)
print "Tags:"
for t in self.tags:
print "\t%s: %s" % (t, str(self.tags[t]))
print "Buffer: %s" % self.buf
return
def set_tags(self, tags={}):
"""set_tags
set the tags for this module
Args:
tags: project specific tags
Return:
Nothing
Raises:
Nothing
"""
self.tags = tags
return
def process_file(self, filename, file_dict, directory="", debug=False):
"""process_file
read in a file, modify it (if necessary), then write it to the location
specified by the directory variable
Args:
filename: the name of the file to process
file_dict: dictionary associated with this file
directory: output directory
Return:
Raises:
ModuleFactoryError
IOError
"""
verbose = False
debug = False
if (filename.endswith(".v")):
self.verilog_file_list.append(filename)
if debug:
print "in process file"
print "\t%s" % filename
#maybe load a tags??
#using the location value in the file_dict find the file and
#pull it into a buf
self.buf = ""
file_location = ""
paths = self.user_paths
#There are two types of files
#ones that are copied over from a location
#ones that are generated by scripts
#The file is specified by a location and basically needs to be copied over
if file_dict.has_key("location"):
#print "Location: %s" % file_dict["location"]
#file_location = os.path.join( utils.nysa_base,
loc = file_dict["location"].split("/")
#print "Loc list: %s" % str(loc)
if loc[0] == "${NYSA}":
loc[0] = utils.nysa_base
#print "Loc list: %s" % str(loc)
file_location = "/"
for d in loc:
file_location = os.path.join(file_location, d)
if (debug):
print ("getting file: " + filename + " from location: " + file_location)
found_file = False
try:
filein = open(os.path.join(utils.resolve_path(file_location), filename))
self.buf = filein.read()
filein.close()
found_file = True
except IOError as err:
pass
if not found_file:
if debug:
print "searching for file...",
try:
absfilename = utils.find_rtl_file_location(filename, self.user_paths)
filepath = os.path.dirname(os.path.dirname(absfilename))
paths.insert(0, filepath)
paths = list(set(paths))
filein = open(absfilename)
self.buf = filein.read()
filein.close()
except:
if debug:
print "Failed to find file"
raise ModuleFactoryError("File %s not found searched %s and in the HDL dir (%s)" % (filename, \
file_location, \
utils.nysa_base + os.path.sep + "cbuilder" + os.path.sep + "verilog"))
if verbose:
print "found file!"
print "file content: " + self.buf
#File is generated by a script
elif (not file_dict.has_key("gen_script")):
raise ModuleFactoryError( "File %s does not declare a location or a \
script! Check the template file" % filename)
if verbose:
print "Project name: " + self.tags["PROJECT_NAME"]
#if the generation flag is set in the dictionary
if "gen_script" in file_dict:
if debug:
print "found the generation script"
print "run generation script: " + file_dict["gen_script"]
#open up the new gen module
ms = sys.modules.keys()
gs = ""
for m in ms:
if m.endswith("gen_scripts"):
gs = m
#print "gs: %s" % gs
cl = __import__("%s.gen" % gs, fromlist=[gs])
#cl = importlib.import_module("gen_scripts", "gen")
#if debug:
# print "cl: " + str(cl)
Gen = getattr(gen, "Gen")
if debug:
print "Gen: " + str(Gen)
self.gen_module = __import__("%s.%s" % (gs, file_dict["gen_script"]), fromlist=[gs])
gen_success_flag = False
#find the script and dynamically add it
for name in dir(self.gen_module):
obj = getattr(self.gen_module, name)
# print "object type: " + str(obj)
#XXX: debug section start
if verbose:
print "name: " + name
if isclass(obj):
if verbose:
print "\tobject type: " + str(obj)
print "\tis class"
if issubclass(obj, cl.Gen):
if verbose:
print "\t\tis subclass"
#XXX: debug section end
if isclass(obj) and issubclass(obj, cl.Gen) and obj is not cl.Gen:
self.gen = obj()
if verbose:
print "obj = " + str(self.gen)
self.buf = self.gen.gen_script(tags = self.tags, buf = self.buf, user_paths = self.user_paths)
gen_success_flag = True
if not gen_success_flag:
raise ModuleFactoryError("Failed to execute the generation script %s" %
file_dict["gen_script"])
else:
#no script to execute, just tags
self.apply_tags()
if verbose:
print self.buf
if (len(self.buf) > 0):
result = self.write_file(directory, filename)
if self.has_dependencies(filename):
deps = self.get_list_of_dependencies(filename)
for d in deps:
try:
f = utils.find_module_filename(d, self.user_paths)
if (len(f) == 0):
print "Error: couldn't find dependency filename"
continue
if (f not in self.verilog_dependency_list and
f not in self.verilog_file_list):
if debug:
print "found dependency: " + f
self.verilog_dependency_list.append(f)
except ModuleNotFound as err:
continue
def resolve_dependencies(self, filename, debug = True):
"""resolve_dependencies
given a filename determine if there are any modules it depends on,
recursively search for any files found in order to extrapolate all
dependencies
Args:
filename: The filename to resolve dependencies for
Return:
Nothing
Raises:
ModuleFactoryError
"""
result = True
ldebug = debug
if debug:
print "in resolve dependencies"
local_file_list = []
if debug:
print "working on filename: " + filename
if (self.has_dependencies(filename, debug = ldebug)):
if debug:
print "found dependencies!"
deps = self.get_list_of_dependencies(filename, debug = ldebug)
for d in deps:
try:
dep_filename = utils.find_module_filename(d, self.user_paths, debug = ldebug)
except ModuleNotFound as ex:
print "Dependency Warning: %s" % (str(ex))
print "Module Name: %s" % (d)
print "This warning may be due to:"
print "\tIncluding a simulation only module"
print "\tIncluding a vendor specific module"
print "\tA module that was not found"
continue
if debug:
print "found the filename: " + dep_filename
#check this file out for dependecies, then append that on to the local list
self.resolve_dependencies(dep_filename, debug = ldebug)
if debug:
print "found all sub dependencies for: " + dep_filename
local_file_list.append(dep_filename)
#go through the local file list and add anything found to the list of dependencies or verilog files
for f in local_file_list:
if (f not in self.verilog_dependency_list) and (f not in self.verilog_file_list):
if debug:
print "found dependency: " + f
self.verilog_dependency_list.append(f)
return
def has_dependencies(self, filename, debug = False):
"""has_dependencies
returns true if the file specified has dependencies
Args:
filename: search for dependencies with this filename
Return:
True: The file has dependencies.
False: The file doesn't have dependencies
Raises:
IOError
"""
if debug:
print "input file: " + filename
#filename needs to be a verilog file
if (filename.partition(".")[2] != "v"):
if debug:
print "File is not a recognized verilog source"
return False
fbuf = ""
#the name is a verilog file, try and open is
try:
filein = open(filename)
fbuf = filein.read()
filein.close()
except IOError as err:
if debug:
print "the file is not a full path, searching RTL... ",
#didn't find with full path, search for it
try:
#print "self.user_paths: %s" % (self.user_paths)
filepath = utils.find_rtl_file_location(filename, self.user_paths)
filein = open(filepath)
fbuf = filein.read()
filein.close()
except ModuleNotFound as err:
fbuf = ""
except IOError as err_int:
if debug:
print "couldn't find file in the RTL directory"
ModuleFactoryError("Couldn't find file %s in the RTL directory" % filename)
#we have an open file!
if debug:
print "found file!"
#strip out everything we can't use
fbuf = utils.remove_comments(fbuf)
#modules have lines that start with a '.'
str_list = fbuf.splitlines()
for item in str_list:
item = item.strip()
if (item.startswith(".")):
if debug:
print "found a module!"
return True
return False
def get_list_of_dependencies(self, filename, debug=False):
"""get_list_of_dependencies
return a list of the files that this file depends on
Args:
filename: the name of the file to analyze
Return:
A list of files that specify the dependenies
Raises:
IOError
"""
deps = []
if debug:
print "input file: " + filename
#filename needs to be a verilog file
if (filename.partition(".")[2] != "v"):
if debug:
print "File is not a recognized verilog source"
return False
fbuf = ""
#the name is a verilog file, try and open is
try:
filein = open(filename)
fbuf = filein.read()
filein.close()
except IOError as err:
#if debug:
# print "the file is not a full path... searching RTL"
#didn't find with full path, search for it
try:
filepath = utils.find_rtl_file_location(filename, self.user_paths)
filein = open(filepath)
fbuf = filein.read()
filein.close()
except IOError as err_int:
ModuleFactoryError("Couldn't find file %s in the RTL directory" % filename)
#we have an open file!
if debug:
print "found file!"
#strip out everything we can't use
fbuf = utils.remove_comments(fbuf)
include_fbuf = fbuf
#search for `include
while (not len(include_fbuf.partition("`include")[2]) == 0):
ifile_name = include_fbuf.partition("`include")[2]
ifile_name = ifile_name.splitlines()[0]
ifile_name = ifile_name.strip()
ifile_name = ifile_name.strip("\"")
if debug:
print "found an include " + ifile_name + " ",
if (ifile_name not in self.verilog_dependency_list) and (ifile_name not in self.verilog_file_list):
self.verilog_dependency_list.append(ifile_name)
if debug:
print "adding " + ifile_name + " to the dependency list"
else:
if debug:
print "... already in have it"
include_fbuf = include_fbuf.partition("`include")[2]
#remove the ports list and the module name
fbuf = fbuf.partition(")")[2]
#modules have lines that start with a '.'
str_list = fbuf.splitlines()
module_token = ""
done = False
while (not done):
for i in range (0, len(str_list)):
line = str_list[i]
#remove white spaces
line = line.strip()
if (line.startswith(".") and line.endswith(",")):
#if debug:
# print "found a possible module... with token: " + line
module_token = line
break
#check if we reached the last line
if (i >= len(str_list) - 1):
done = True
if (not done):
#found a possible module
#partitoin the fbuf
#if debug:
# print "module token " + module_token
module_string = fbuf.partition(module_token)[0]
fbuf = fbuf.partition(module_token)[2]
fbuf = fbuf.partition(";")[2]
str_list = fbuf.splitlines()
#get rid of everything before the possible module
while (len(module_string.partition(";")[2]) > 0):
module_string = module_string.partition(";")[2]
module_string = module_string.partition("(")[0]
module_string = module_string.strip("#")
module_string = module_string.strip()
m_name = module_string.partition(" ")[0]
if debug:
print "module name: " + m_name
if (not deps.__contains__(m_name)):
if debug:
print "adding it to the deps list"
deps.append(module_string.partition(" ")[0])
#mlist = module_string.splitlines()
#work backwords
#look for the last line that has a '('
#for i in range (0, len(mlist)):
# mstr = mlist[i]
# print "item: " + mlist[i]
# #mstr = mlist[len(mlist) - 1 - i]
# #mstr = mstr.strip()
# if (mstr.__contains__(" ")):
# if debug:
# print "found: " + mstr.partition(" ")[0]
# deps.append(mstr.partition(" ")[0])
# break
return deps
|
|
from datetime import datetime
import json
import time
import sys
from bs4 import BeautifulSoup
import dryscrape
class LinkScrape:
LOGIN_EMAIL = 'EMAIL'
LOGIN_PASSWORD = 'PASSWORD'
BASE_URL = 'https://www.linkedin.com'
sess = None
RESULTS = {}
def __init__(self, email, password, loadprevious):
try:
self.load_results()
except:
print 'No past results loaded'
self.LOGIN_EMAIL = email
self.LOGIN_PASSWORD = password
# set up a web scraping session
self.sess = dryscrape.Session(base_url=self.BASE_URL)
# we don't need images
self.sess.set_attribute('auto_load_images', False)
# visit homepage and log in
self.login()
if not loadprevious:
urls = self.get_all_linkedin_company_urls()
else:
urls = self.load_companies()
self.process_companies(urls)
def find_employee_blanks(self):
failed_companies = self.load_failed_companies()
for company_name, company_insights in self.RESULTS.iteritems():
employees = company_insights['employees']
if not employees:
failed_companies[company_name] = company_insights['linkedin']
self.save_failed_companies(failed_companies)
def get_all_linkedin_company_urls(self):
'''
Iterates through every page in the search results
and builds list of company names & LinkedIn URLS.
Returns:
dict: Company names mapped to LinkedIn URLs
'''
company_links = {}
# Search for 51-200 employee companies in Retail, Apparel & Fashion,
# in Ireland, USA, UK, Canada
# int_fashion_retail = 'https://www.linkedin.com/vsearch/c?type=companies&orig=\
# FCTD&rsid=1219664211436979526645&pageKey=oz-winner&searc\
# h=Search&f_CCR=us%3A0,gb%3A0,ie%3A0,ca%3A0&f_I=27,19&open\
# Facets=N,CCR,JO,I,CS&f_CS=D'
# Hospitality sector companies in Ireland, with 51-5000 employees
irl_hospitality = 'https://www.linkedin.com/vsearch/c?type=companies&orig\
=FCTD&rsid=1219664211455638914865&pageKey=oz-winner&sea\
rc\%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%2\
0%20%20%20%20%20%20%20h=Search&f_CCR=ie%3A0&open\%20%20%2\
0%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%2\
0%20%20%20Facets=N,CCR,JO,I,CS&openFacets=N,CCR,JO,I,CS&\
f_I=31&f_CS=D,E,F,G'
self.sess.visit(irl_hospitality)
# Iterate over search results pages
while 1:
# Get HTML for this page & extract links
html = self.sess.body()
soup = BeautifulSoup(html, 'lxml')
for i in range(1, 10):
try:
result = soup.find("li", {"class": "mod result \
idx{num} company".format(num=i)})
anchor = result.find("a", {"class": "title"})
link = self.BASE_URL + anchor.get('href')
name = anchor.contents[0]
print 'Added LinkedIn URL for {name}'.format(name=name)
company_links[name] = link
self.save_company_links(company_links)
except Exception as e:
print e
# Get the URL of the next search page, if it exists
next_page_url = soup.find("a", {
"class": "page-link",
"title": "Next Page"})
if next_page_url:
time.sleep(1)
next_page_url = self.BASE_URL + next_page_url.get('href')
self.sess.visit(next_page_url)
else:
return company_links
def get_employees(self, soup):
'''
Checks each company employee to find marketing people.
Args:
(str) soup: BeautifulSoup object for company page
Returns:
dict: Marketing employees and their details
'''
employee_details = {}
# Get search results for employees in this company
employees_box = soup.find("div", {"class": "company-density module"})
view_all_link = employees_box.find("a", {"class": "more"}).get("href")
view_all_link = self.BASE_URL + view_all_link
self.sess.visit(view_all_link)
# Iterate over all the employees
while 1:
time.sleep(3)
html = self.sess.body()
soup = BeautifulSoup(html, 'lxml')
for i in range(1, 10):
self.sess.render('employeePage.png')
try:
result = soup.find("li", {"class": "mod result \
idx{num} people".format(num=i)})
if result: # Otherwise no more result items
anchor = result.find("a", {"class": "title"})
link = anchor.get('href')
name = anchor.text
description = result.find("div", {"class":
"description"}).text
if ('marketing' in description.lower() or
'cmo' in description.lower()):
employee_details[name] = {
'link': link,
'description': description
}
print 'Found person: {name}'.format(name=name)
except Exception as e:
print e
# Get the URL of the next search page, if it exists
next_page_url = soup.find("a", {
"class": "page-link",
"title": "Next Page"})
if next_page_url:
next_page_url = self.BASE_URL + next_page_url.get('href')
self.sess.visit(next_page_url)
else:
return employee_details
def process_companies(self, linkedin_urls):
'''
Given company linkedin URL, get company & employee details
Iterates thorugh RESULTS and adds new info.
Args:
(dict) linkedin_urls: Company names mapped to linkedin urls
'''
try:
failed_companies = self.load_failed_companies()
except:
failed_companies = {} # When can't get details automatically
for company_name, linkedin_url in linkedin_urls.iteritems():
if (company_name not in self.RESULTS or
company_name in failed_companies):
self.sess.visit(linkedin_url) # Visit linkedin page
self.sess.render('companyPage.png')
html = self.sess.body()
soup = BeautifulSoup(html, 'lxml')
try:
# Get list item containing website
website = soup.find("li", {"class": "website"})
website = website.find("a").text
print website
# Get Industry
industry = soup.find("li", {"class": "industry"})
industry = industry.find("p").text
print industry
# Get Size
size = soup.findAll("a", {"class": "density"})
try:
size = size[1].text
except:
size = size[0].text
print size
if 25 <= int(size.replace(',', '')) <= 250:
ts = time.time()
ts = datetime.fromtimestamp(
ts).strftime('%Y-%m-%d %H:%M:%S')
print ts
employees = self.get_employees(soup)
if employees:
company_insights = {
'linkedin': linkedin_url,
'website': website,
'industry': industry,
'size': size,
'employees': employees,
'processed': ts
}
print 'Processed {name}:'.format(name=company_name)
print company_insights
self.RESULTS[company_name] = company_insights
self.save_results()
else:
failed_companies[company_name] = linkedin_url
self.save_failed_companies(failed_companies)
except Exception as e:
print e
failed_companies[company_name] = linkedin_url
self.save_failed_companies(failed_companies)
print 'Cant get details for company: \
{url}'.format(url=linkedin_url)
time.sleep(3)
else:
print 'Company already processed: {}'.format(company_name)
def login(self):
'''
Assumes user is not signed in - signs them in
'''
print 'Logging in...'
self.sess.visit('/')
html = self.sess.body()
soup = BeautifulSoup(html, 'lxml')
email_field = self.sess.at_css('#login-email')
password_field = self.sess.at_css('#login-password')
email_field.set(self.LOGIN_EMAIL)
password_field.set(self.LOGIN_PASSWORD)
# signin = self.sess.at_css('#signin')
signin = soup.find("input", {"value": "Sign in"})
print signin
time.sleep(1) # Give the signin button a second to become active
self.sess.render('loginPage.png')
signin.click()
print 'Logged In'
def load_companies(self):
'''
Company links from Json
'''
with open('company_links.json', 'r') as links_file:
links = json.load(links_file)
print 'Loaded company links.'
return links
def load_results(self):
'''
Results from Json
'''
with open('results.json', 'r') as results_file:
self.RESULTS = json.load(results_file)
print 'Loaded past results.'
def load_failed_companies(self):
'''
Failed from Json
Returns:
dict: Company name mapped to LinkedIn url
'''
with open('failed_links.json', 'r') as failed_file:
failed_companies = json.load(failed_file)
print 'Loaded past results.'
return failed_companies
def save_company_links(self, links):
'''
Company links to Json, in case of crash
'''
with open('company_links.json', 'w+') as links_file:
json.dump(links, links_file)
print 'Saved company links.'
def save_failed_companies(self, failed_companies):
'''
Failed company links to Json, for manual review
'''
with open('failed_links.json', 'w+') as failed_file:
json.dump(failed_companies, failed_file)
print 'Saved failed company links.'
def save_results(self):
'''
RESULTS to Json file
'''
with open('results.json', 'w+') as results_file:
json.dump(self.RESULTS, results_file)
print 'Saved Results'
email = str(sys.argv[1])
password = str(sys.argv[2])
try:
load_previous = str(sys.argv[3])
except:
load_previous = False
scraper = LinkScrape(email, password, load_previous)
|
|
"""
Some quick and dirty tests for a very small subset of the code.
"""
import pytest
from testtools.assertions import assert_that
from testtools.matchers import MatchesSetwise
from xenserver import tasks
from xenserver.tests.helpers import VM_MEM
from xenserver.tests.matchers import (
ExtractValues, MatchesSetOfLists, MatchesVMNamed, MatchesXenServerVIF,
MatchesXenServerVM)
def apply_task(task, *args, **kw):
"""
Wrapper around <task>.apply(...).get() to make sure the task properly
propagates exceptions, etc.
"""
task.apply(*args, **kw).get()
@pytest.mark.django_db
class TestCreateVM(object):
"""
Test xenserver.tasks.create_vm task.
"""
def extract_VIFs(self, xenserver, VM_ref, spec):
"""
Get the VIFs for the given VM and match them to a list of (network,
VIF) ref pairs.
"""
assert_that(
xenserver.list_network_VIFs_for_VM(VM_ref),
MatchesSetOfLists(spec))
def extract_VBDs(self, xenserver, VM_ref, spec):
"""
Get the VBDs for the given VM and match them to a list of (SR, VBD)
ref pairs.
"""
assert_that(
xenserver.list_SR_VBDs_for_VM(VM_ref),
MatchesSetOfLists(spec))
def matches_vm(self, local_SR, VIFs, VBDs, **kw):
"""
Build an ExpectedXenServerVM object with some default parameters.
"""
params = {
"name_label": "None.None",
"memory_static_max": str(VM_MEM*1024*1024),
"memory_dynamic_max": str(VM_MEM*1024*1024),
"suspend_SR": local_SR,
}
return MatchesXenServerVM(
VIFs=MatchesSetwise(*VIFs), VBDs=MatchesSetwise(*VBDs), **params)
xapi_versions = pytest.mark.parametrize('xapi_version', [(1, 1), (1, 2)])
@xapi_versions
def test_create_vm_simple(self, xapi_version, xs_helper):
"""
We can create a new VM using mostly default values.
"""
xsh, xs = xs_helper.new_host('xenserver01.local', xapi_version)
templ = xs_helper.db_template("default")
vm = xs_helper.db_xenvm(xs, "foovm", templ, status="Provisioning")
assert vm.xsref == ''
assert xsh.api.VMs == {}
apply_task(tasks.create_vm,
[vm, xs, templ, None, None, None, None, None, None],
{'extra_network_bridges': []})
vm.refresh_from_db()
assert vm.xsref != ''
# Make sure the right VIFs and VBDs were created and extract their
# reference values.
ev = ExtractValues("VIF", "iso_VBD", "local_VBD")
self.extract_VIFs(xsh.api, vm.xsref, [
(xsh.net['eth0'], ev.VIF),
])
self.extract_VBDs(xsh.api, vm.xsref, [
(xsh.sr['iso'], ev.iso_VBD),
(xsh.sr['local'], ev.local_VBD),
])
# The VM data structure should match the values we passed to
# create_vm().
assert xsh.api.VMs.keys() == [vm.xsref]
assert_that(xsh.api.VMs[vm.xsref], self.matches_vm(
xsh.sr['local'], VIFs=[ev.VIF], VBDs=[ev.iso_VBD, ev.local_VBD]))
# The VIF data structures should match the values we passed to
# create_vm().
assert xsh.api.VIFs.keys() == [ev.VIF.value]
assert_that(xsh.api.VIFs[ev.VIF.value], MatchesXenServerVIF(
device="0", VM=vm.xsref, network=xsh.net['eth0']))
# The VM should be started.
assert xsh.api.VM_operations == [(vm.xsref, "start")]
@xapi_versions
def test_create_vm_second_vif(self, xapi_version, xs_helper):
"""
We can create a new VM with a second VIF.
"""
xsh, xs = xs_helper.new_host('xenserver01.local', xapi_version)
templ = xs_helper.db_template("default")
vm = xs_helper.db_xenvm(xs, "foovm", templ, status="Provisioning")
assert vm.xsref == ''
assert xsh.api.VMs == {}
apply_task(tasks.create_vm,
[vm, xs, templ, None, None, None, None, None, None],
{'extra_network_bridges': ['xenbr1']})
vm.refresh_from_db()
assert vm.xsref != ''
# Make sure the right VIFs and VBDs were created and extract their
# reference values.
ev = ExtractValues("pub_VIF", "prv_VIF", "iso_VBD", "local_VBD")
self.extract_VIFs(xsh.api, vm.xsref, [
(xsh.net['eth0'], ev.pub_VIF),
(xsh.net['eth1'], ev.prv_VIF),
])
self.extract_VBDs(xsh.api, vm.xsref, [
(xsh.sr['iso'], ev.iso_VBD),
(xsh.sr['local'], ev.local_VBD),
])
# The VM data structure should match the values we passed to
# create_vm().
assert xsh.api.VMs.keys() == [vm.xsref]
assert_that(xsh.api.VMs[vm.xsref], self.matches_vm(
xsh.sr['local'], VIFs=[ev.pub_VIF, ev.prv_VIF],
VBDs=[ev.iso_VBD, ev.local_VBD]))
# The VIF data structures should match the values we passed to
# create_vm().
assert_that(
xsh.api.VIFs.keys(), MatchesSetwise(ev.pub_VIF, ev.prv_VIF))
assert_that(xsh.api.VIFs[ev.pub_VIF.value], MatchesXenServerVIF(
device="0", VM=vm.xsref, network=xsh.net['eth0']))
assert_that(xsh.api.VIFs[ev.prv_VIF.value], MatchesXenServerVIF(
device="1", VM=vm.xsref, network=xsh.net['eth1']))
# The VM should be started.
assert xsh.api.VM_operations == [(vm.xsref, "start")]
@pytest.mark.django_db
class TestUpdateVms(object):
"""
Test xenserver.tasks.updateVms task.
"""
def test_no_servers(self, xs_helper, task_catcher):
"""
Nothing to do if we have no servers.
"""
us_calls = task_catcher.catch_updateServer()
apply_task(tasks.updateVms)
assert us_calls == []
def test_one_server(self, xs_helper, task_catcher):
"""
A single server will be updated.
"""
xs_helper.new_host('xs01.local')
us_calls = task_catcher.catch_updateServer()
apply_task(tasks.updateVms)
assert us_calls == ['xs01.local']
def test_three_servers(self, xs_helper, task_catcher):
"""
Multiple servers will be updated.
"""
xs_helper.new_host('xs01.local')
xs_helper.new_host('xs02.local')
xs_helper.new_host('xs03.local')
us_calls = task_catcher.catch_updateServer()
apply_task(tasks.updateVms)
assert sorted(us_calls) == ['xs01.local', 'xs02.local', 'xs03.local']
def no_urlopen(url):
raise NotImplementedError('urllib2.urlopen() excised for tests.')
@pytest.mark.django_db
class TestUpdateServer(object):
"""
Test xenserver.tasks.updateServer task.
"""
def test_first_run(self, xs_helper, task_catcher):
"""
The first run of updateServer() after a new host is added will update
the two fields that reflect resource usage.
NOTE: We stub out urllib2.urlopen() so that it doesn't try to talk to
the network. The failure to fetch host metrics is silently ignored.
"""
task_catcher.patch_urlopen(no_urlopen)
_, xs = xs_helper.new_host('xs01.local')
uv_calls = task_catcher.catch_updateVm()
xs01before = xs_helper.get_db_xenserver_dict('xs01.local')
apply_task(tasks.updateServer, [xs])
xs01after = xs_helper.get_db_xenserver_dict('xs01.local')
# Two fields have changed.
assert xs01before.pop('mem_free') != xs01after.pop('mem_free')
assert xs01before.pop('cpu_util') != xs01after.pop('cpu_util')
# All the others are the same.
assert xs01before == xs01after
assert uv_calls == []
def test_one_vm(self, xs_helper, task_catcher):
"""
If a server has a single VM running on it, we schedule a single
updateVm task.
NOTE: We stub out urllib2.urlopen() so that it doesn't try to talk to
the network. The failure to fetch host metrics is silently ignored.
"""
task_catcher.patch_urlopen(no_urlopen)
_, xs = xs_helper.new_host('xs01.local')
vm = xs_helper.new_vm(xs, 'vm01.local')
uv_calls = task_catcher.catch_updateVm()
apply_task(tasks.updateServer, [xs])
assert_that(uv_calls, MatchesSetOfLists([
('xs01.local', vm.xsref, MatchesVMNamed('vm01.local'))]))
def test_two_vms(self, xs_helper, task_catcher):
"""
If a server has two VMs running on it, we schedule an updateVm task for
each.
NOTE: We stub out urllib2.urlopen() so that it doesn't try to talk to
the network. The failure to fetch host metrics is silently ignored.
"""
task_catcher.patch_urlopen(no_urlopen)
_, xs = xs_helper.new_host('xs01.local')
vm01 = xs_helper.new_vm(xs, 'vm01.local')
vm02 = xs_helper.new_vm(xs, 'vm02.local')
uv_calls = task_catcher.catch_updateVm()
apply_task(tasks.updateServer, [xs])
assert_that(uv_calls, MatchesSetOfLists([
('xs01.local', vm01.xsref, MatchesVMNamed('vm01.local')),
('xs01.local', vm02.xsref, MatchesVMNamed('vm02.local')),
]))
@pytest.mark.django_db
class TestUpdateVm(object):
"""
Test xenserver.tasks.updateVm task.
"""
def test_first_run(self, xs_helper, task_catcher):
"""
The first run of updateVm() after a new VM is provisioned will update
the uuid field.
"""
xsh, xs = xs_helper.new_host('xs01.local')
vm = xs_helper.new_vm(xs, 'vm01.local')
vm01before = xs_helper.get_db_xenvm_dict('vm01.local')
vmobj = xsh.get_session().xenapi.VM.get_record(vm.xsref)
apply_task(tasks.updateVm, [xs, vm.xsref, vmobj])
vm01after = xs_helper.get_db_xenvm_dict('vm01.local')
# One field has changed.
assert vm01before.pop('uuid') != vm01after.pop('uuid')
# All the others are the same.
assert vm01before == vm01after
|
|
# -*- coding: utf-8 -*-
from abc import abstractmethod, ABCMeta
import numpy as np
import cv2
import scipy.ndimage as snd
# from skimage.feature import structure_tensor, structure_tensor_eigvals, hog
from sldc import Logger, StandardOutputLogger, SilentLogger
__author__ = "Rubens Ulysse <urubens@uliege.be>"
__version__ = '0.1'
def split_list_to_dict(lst, splitter='__'):
if not isinstance(lst, list):
lst = [lst]
d = dict()
for item in lst:
c, v = item.split(splitter)
if c not in d:
d[c] = [v]
else:
d[c].append(v)
return d
class Colorspace:
def __init__(self):
self.image = None
self.feature_extractors = list()
@abstractmethod
def build(self, image):
pass
class ColorspaceRGB(Colorspace):
def __init__(self, features):
Colorspace.__init__(self)
for feature in features:
if feature in ('rgb', 'RGB'):
self.feature_extractors.append(FeatureExtractor_RGB)
elif feature in ('hsv', 'HSV'):
self.feature_extractors.append(FeatureExtractor_HSV)
elif feature in ('luv', 'Luv', 'LUV'):
self.feature_extractors.append(FeatureExtractor_Luv)
def build(self, image):
if len(image.shape) == 3:
self.image = image
else:
self.image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
return self
class ColorspaceGrayscale(Colorspace):
def __init__(self, features):
Colorspace.__init__(self)
for feature in features:
if feature in ('gray', 'grayscale'):
self.feature_extractors.append(FeatureExtractor_Grayscale)
elif feature in ('gray_norm', 'normalized'):
self.feature_extractors.append(FeatureExtractor_GrayscaleNormalized)
elif feature in ('sobel', 'grad1', 'sobel1'):
self.feature_extractors.append(FeatureExtractor_Sobel1)
elif feature in ('grad2', 'sobel2'):
self.feature_extractors.append(FeatureExtractor_Sobel2)
elif feature in ('gradmagn', 'grad_magn', 'sobel_gradmagn'):
self.feature_extractors.append(FeatureExtractor_GradientMagnitude)
# elif feature in ('HoG', 'hog'):
# self.feature_extractors.append(FeatureExtractor_HoG)
# elif feature in ('gaussian_laplace', 'gauss_laplace'):
# self.feature_extractors.append(FeatureExtractor_LaplacianOfGaussian)
# elif feature in ('structure_tensor_eig', 'ste'):
# self.feature_extractors.append(FeatureExtractor_EigenvaluesStructureTensor)
def build(self, image):
if len(image.shape) == 2:
self.image = image
else:
self.image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return self
class FeaturesExtractor:
def __init__(self, colorspaces, dtype=np.float16, logger=StandardOutputLogger(Logger.INFO)):
if colorspaces is None:
raise ValueError("Colorspace cannot be None")
self.logger = logger
self.dtype = dtype
self.features = None
self.colorspaces = list()
colorspaces_dict = split_list_to_dict(colorspaces, splitter='__')
for colorspace, features in colorspaces_dict.items():
if colorspace in ('rgb', 'RGB', 'color'):
self.colorspaces.append(ColorspaceRGB(features))
elif colorspace in ('gray', 'grayscale', 'L'):
self.colorspaces.append(ColorspaceGrayscale(features))
def build(self, image):
self.logger.i("[FEATURE EXTRACTOR] Start building features extractor...")
[c.build(image) for c in self.colorspaces]
self.features = np.dstack([f(logger=self.logger).extract(c.image)
for c in self.colorspaces for f in c.feature_extractors])
self.logger.i("[FEATURE_EXTRACTOR] Done.")
return self
@property
def feature_image(self):
return np.asarray(self.features, dtype=self.dtype)
@classmethod
def create_from_parameters(cls, parameters, logger=StandardOutputLogger(Logger.INFO)):
kwargs = parameters if isinstance(parameters, dict) else vars(parameters)
kwargs.update({'logger': logger})
return cls(**kwargs)
class FeatureExtractor(object):
__metaclass__ = ABCMeta
def __init__(self, name=None, logger=SilentLogger()):
self.logger = logger
self.name = name
@abstractmethod
def extract(self, image):
pass
class FeatureExtractor1(FeatureExtractor):
def extract(self, image):
if not len(image.shape) == 2:
raise ValueError("Cannot extract a grayscale feature on this image")
class FeatureExtractor3(FeatureExtractor):
def extract(self, image):
if not len(image.shape) == 3:
raise ValueError("Cannot extract a color feature on this image")
class FeatureExtractor_RGB(FeatureExtractor3):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing RGB extraction.")
super(FeatureExtractor_RGB, self).extract(image)
return image
class FeatureExtractor_HSV(FeatureExtractor3):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing HSV extraction.")
super(FeatureExtractor_HSV, self).extract(image)
return cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
class FeatureExtractor_Luv(FeatureExtractor3):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing Luv extraction.")
super(FeatureExtractor_Luv, self).extract(image)
return cv2.cvtColor(image, cv2.COLOR_RGB2Luv)
class FeatureExtractor_Grayscale(FeatureExtractor1):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing grayscale extraction.")
super(FeatureExtractor_Grayscale, self).extract(image)
return image
class FeatureExtractor_GrayscaleNormalized(FeatureExtractor1):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing normalized grayscale extraction.")
super(FeatureExtractor_GrayscaleNormalized, self).extract(image)
return cv2.equalizeHist(image.astype(np.uint8))
class FeatureExtractor_Sobel1(FeatureExtractor1):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing Sobel at 1st order extraction.")
super(FeatureExtractor_Sobel1, self).extract(image)
sob_x = cv2.Sobel(image, cv2.CV_32F, 1, 0)
sob_y = cv2.Sobel(image, cv2.CV_32F, 0, 1)
alpha = 1 # 0.25
return np.dstack((cv2.convertScaleAbs(sob_x, alpha=alpha),
cv2.convertScaleAbs(sob_y, alpha=alpha)))
class FeatureExtractor_Sobel2(FeatureExtractor1):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing Sobel at 2nd order extraction.")
super(FeatureExtractor_Sobel2, self).extract(image)
sob_x = cv2.Sobel(image, cv2.CV_32F, 2, 0)
sob_y = cv2.Sobel(image, cv2.CV_32F, 0, 2)
alpha = 1 # 0.25
return np.dstack((cv2.convertScaleAbs(sob_x, alpha=alpha),
cv2.convertScaleAbs(sob_y, alpha=alpha)))
class FeatureExtractor_GradientMagnitude(FeatureExtractor1):
def extract(self, image):
self.logger.i("[FEATURE EXTRACTOR] Performing gradient magnitude extraction.")
super(FeatureExtractor_GradientMagnitude, self).extract(image)
sob_x = cv2.Sobel(image, cv2.CV_32F, 1, 0, 3)
sob_y = cv2.Sobel(image, cv2.CV_32F, 0, 1, 3)
magnitude = cv2.magnitude(sob_x, sob_y)
alpha = 1 # 0.9
return cv2.convertScaleAbs(magnitude, alpha=alpha)
# gray = gray.astype(np.float32)
# scales = [0.8 * (2 ** n) for n in range(3)]
# return np.dstack(tuple([snd.filters.gaussian_gradient_magnitude(gray, sigma=s) for s in scales]))
# class FeatureExtractor_HoG(FeatureExtractor1):
# def extract(self, image):
# self.logger.i("[FEATURE EXTRACTOR] Performing histogram of gradient (HOG) extraction.")
# super(FeatureExtractor_HoG, self).extract(image)
# _, h = hog(image, visualise=True)
# return h
#
#
# class FeatureExtractor_LaplacianOfGaussian(FeatureExtractor1):
# def __init__(self, logger=SilentLogger()):
# super(FeatureExtractor_LaplacianOfGaussian, self).__init__(logger=logger)
# self.scales = [0.8 * (2 ** n) for n in range(3)]
#
# def extract(self, image):
# self.logger.i("[FEATURE EXTRACTOR] Performing Laplacian of Gaussian extraction.")
# super(FeatureExtractor_LaplacianOfGaussian, self).extract(image)
# image = image.astype(np.float32)
# return np.dstack(tuple([snd.filters.gaussian_laplace(image, sigma=s) for s in self.scales]))
#
#
# class FeatureExtractor_EigenvaluesStructureTensor(FeatureExtractor1):
# def __init__(self, logger=SilentLogger()):
# super(FeatureExtractor_EigenvaluesStructureTensor, self).__init__(logger=logger)
# self.scales = [0.8 * (2 ** n) for n in range(3)]
#
# def extract(self, image):
# self.logger.i("[FEATURE EXTRACTOR] Performing eigenvalue of structure tensor extraction.")
# super(FeatureExtractor_EigenvaluesStructureTensor, self).extract(image)
# image = image.astype(np.float32)
# image /= image.max()
# eigs = [structure_tensor_eigvals(*structure_tensor(image, sigma=s)) for s in self.scales]
# return np.dstack(tuple([a for eig in eigs for a in eig]))
if __name__ == '__main__':
import matplotlib.pyplot as plt
import sys
import os
import numpy as np
def flip(m, axis):
indexer = [slice(None)] * m.ndim
indexer[axis] = slice(None, None, -1)
return m[tuple(indexer)]
filename = sys.argv[1]
if not os.path.exists(filename):
raise IOError("File " + filename + " does not exist !")
img = cv2.imread(filename, cv2.IMREAD_COLOR)
img = img[250:,250:,...]
colorspaces = ['RGB__rgb', 'RGB__luv', 'RGB__hsv', 'L__normalized',
'L__sobel1', 'L__sobel_gradmagn', 'L__sobel2']
fe = FeaturesExtractor(colorspaces, logger=StandardOutputLogger(Logger.DEBUG))
fe.build(img)
titles = ['Red', 'Green', 'Blue', 'L*', 'u*', 'v*', 'Hue', 'Saturation', 'Value',
'Eq. grayscale', '1st-Sobel x', '1st-Sobel y', 'Sobel grad. magn.',
'2st-Sobel x', '2st-Sobel y']
n_rows = 4
n_cols = 4
fig = plt.figure()
a = fig.add_subplot(n_rows, n_cols, 1)
plt.imshow(flip(img, 2))
a.set_title('Original RGB image')
a.set_axis_off()
for i in range(fe.features.shape[2]):
a = fig.add_subplot(n_rows, n_cols, i+2)
plt.imshow(fe.features[:, :, i], cmap='viridis')
a.set_title(titles[i])
a.set_axis_off()
plt.show()
|
|
import os
import sys
import errno
import pathlib
import tempfile
import unittest
import time
import shutil
import subprocess
from inspect import signature
from subprocess import Popen, PIPE
from tempfile import TemporaryDirectory
from multiprocessing.dummy import Pool
from memory_profiler import profile as profile_memory
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from pdf2image import (
convert_from_bytes,
convert_from_path,
pdfinfo_from_bytes,
pdfinfo_from_path,
)
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError,
PDFPopplerTimeoutError,
)
from functools import wraps
PROFILE_MEMORY = os.environ.get('PROFILE_MEMORY', False)
try:
subprocess.call(
["pdfinfo", "-h"], stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w")
)
POPPLER_INSTALLED = True
except OSError as e:
if e.errno == errno.ENOENT:
POPPLER_INSTALLED = False
def profile(f):
if PROFILE_MEMORY:
@wraps(f)
@profile_memory
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return r
return wrapped
else:
@wraps(f)
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return r
return wrapped
def get_poppler_path():
return pathlib.Path(
Popen(["which", "pdftoppm"], stdout=PIPE).communicate()[0].strip().decode()
).parent
class PDFConversionMethods(unittest.TestCase):
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 1)
print("test_conversion_from_bytes: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf")
self.assertTrue(len(images_from_path) == 1)
print("test_conversion_from_path: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path("./tests/test.pdf", output_folder=path)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf")
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 14)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_bytes_241(self): # pragma: no cover
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read())
self.assertTrue(len(images_from_bytes) == 241)
print(
"test_conversion_from_bytes_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_path_241(self): # pragma: no cover
start_time = time.time()
images_from_path = convert_from_path("./tests/test_241.pdf")
self.assertTrue(len(images_from_path) == 241)
print(
"test_conversion_from_path_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_bytes_using_dir_241(self): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path
)
self.assertTrue(len(images_from_bytes) == 241)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
def test_conversion_from_path_using_dir_241(self): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_241.pdf", output_folder=path
)
self.assertTrue(len(images_from_path) == 241)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_241: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_not_pdf(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/test.jpg")
print("test_empty_if_not_pdf: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_file_not_found(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/totally_a_real_file_in_folder.xyz")
print("test_empty_if_file_not_found: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_empty_if_corrupted_pdf(self):
start_time = time.time()
with self.assertRaises(Exception):
convert_from_path("./tests/test_corrupted.pdf")
print("test_empty_if_corrupted_pdf: {} sec".format(time.time() - start_time))
## Test first page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_first_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), first_page=12)
self.assertTrue(len(images_from_bytes) == 3)
print(
"test_conversion_from_bytes_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_12(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", first_page=12)
self.assertTrue(len(images_from_path) == 3)
print(
"test_conversion_from_path_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_first_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, first_page=12
)
self.assertTrue(len(images_from_bytes) == 3)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_first_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, first_page=12
)
self.assertTrue(len(images_from_path) == 3)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_first_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test last page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_last_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), last_page=12)
self.assertTrue(len(images_from_bytes) == 12)
print(
"test_conversion_from_bytes_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_last_page_12(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", last_page=12)
self.assertTrue(len(images_from_path) == 12)
print(
"test_conversion_from_path_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, last_page=12
)
self.assertTrue(len(images_from_bytes) == 12)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, last_page=12
)
self.assertTrue(len(images_from_path) == 12)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test first and last page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_first_page_2_last_page_12(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), first_page=2, last_page=12
)
self.assertTrue(len(images_from_bytes) == 11)
print(
"test_conversion_from_bytes_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_2_last_page_12(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=2, last_page=12
)
self.assertTrue(len(images_from_path) == 11)
print(
"test_conversion_from_path_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, first_page=2, last_page=12
)
self.assertTrue(len(images_from_bytes) == 11)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_first_page_2_last_page_12(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, first_page=2, last_page=12
)
self.assertTrue(len(images_from_path) == 11)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_first_page_2_last_page_12: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output as jpeg
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_jpeg_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="jpg")
self.assertTrue(images_from_bytes[0].format == "JPEG")
print(
"test_conversion_to_jpeg_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_jpeg_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="jpeg"
)
self.assertTrue(images_from_path[0].format == "JPEG")
[im.close() for im in images_from_path]
print(
"test_conversion_to_jpeg_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output as png
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_png_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="png")
self.assertTrue(images_from_bytes[0].format == "PNG")
print(
"test_conversion_to_png_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_png_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="png"
)
self.assertTrue(images_from_path[0].format == "PNG")
[im.close() for im in images_from_path]
print(
"test_conversion_to_png_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test output with not-empty output_folder
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_non_empty_output_folder(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder="./tests/"
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
[os.remove(im.filename) for im in images_from_path]
print(
"test_non_empty_output_folder: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test format that starts with a dot
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_format_that_starts_with_a_dot(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_format_that_starts_with_a_dot: {} sec".format(
time.time() - start_time
)
)
## Test locked PDF
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_userpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_user_only.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_not_locked_pdf(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_owner_only.pdf", "rb") as pdf_file:
# No need to pass a ownerpw because the absence of userpw means we can read it anyway
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_only: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_and_userpw(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_both.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", userpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_and_userpw: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_locked_pdf_with_ownerpw_and_userpw_forgotten(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_both_user_forgotten.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, fmt=".jpg", ownerpw="pdf2image"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_locked_pdf_with_ownerpw_and_userpw_forgotten: {} sec".format(
time.time() - start_time
)
)
## Tests cropbox
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_cropbox(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), use_cropbox=True)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_using_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_cropbox(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", use_cropbox=True)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_using_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_and_cropbox(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, use_cropbox=True
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_and_cropbox: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_and_cropbox(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, use_cropbox=True
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_and_cropbox: {} sec".format(
time.time() - start_time
)
)
## Tests multithreading
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_with_4_threads(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4)
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_with_4_threads(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=4)
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_14_with_15_threads(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=15)
self.assertTrue(len(images_from_bytes) == 14)
print(
"test_conversion_from_bytes_14_with_15_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_with_0_threads(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", thread_count=0)
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_path_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_14_with_4_threads(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_bytes) == 14)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_with_4_threads(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_with_4_thread: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_241_with_4_threads(self): # pragma: no cover
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), thread_count=4)
self.assertTrue(len(images_from_bytes) == 241)
print(
"test_conversion_from_bytes_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_241_with_4_threads(self): # pragma: no cover
start_time = time.time()
images_from_path = convert_from_path("./tests/test_241.pdf", thread_count=4)
self.assertTrue(len(images_from_path) == 241)
print(
"test_conversion_from_path_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_241_with_4_threads(
self,
): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_241.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_bytes) == 241)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
@profile
@unittest.skipIf(
"CIRCLECI" in os.environ and os.environ["CIRCLECI"] == "true",
"Skipping this test on CircleCI.",
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_241_with_4_threads(
self,
): # pragma: no cover
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_241.pdf", output_folder=path, thread_count=4
)
self.assertTrue(len(images_from_path) == 241)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_241_with_4_thread: {} sec".format(
(time.time() - start_time) / 241.0
)
)
# Testing custom exceptions
@unittest.skipIf(POPPLER_INSTALLED, "Poppler is installed, skipping.")
def test_pdfinfo_not_installed_throws(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test_14.pdf")
raise Exception("This should not happen")
except PDFInfoNotInstalledError as ex:
pass
print(
"test_pdfinfo_not_installed_throws: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_missingfonterror_throws(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test_strict.pdf", strict=True)
raise Exception("This should not happen")
except PDFSyntaxError as ex:
pass
print("test_syntaxerror_throws: {} sec".format(time.time() - start_time))
# Test transparent
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_transparent(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), transparent=True, fmt="png"
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_using_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_transparent(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", transparent=True, fmt="png"
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_using_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_and_transparent(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), output_folder=path, transparent=True, fmt="png"
)
self.assertTrue(len(images_from_bytes) == 1)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_and_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_and_transparent(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, transparent=True, fmt="png"
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_and_transparent: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_transparent_without_png(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", transparent=True)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_transparent_without_png: {} sec".format(
time.time() - start_time
)
)
## Test output as TIFF
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_tiff_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), fmt="tiff")
self.assertTrue(images_from_bytes[0].format == "TIFF")
print(
"test_conversion_to_tiff_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_tiff_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, fmt="tiff"
)
self.assertTrue(images_from_path[0].format == "TIFF")
[im.close() for im in images_from_path]
print(
"test_conversion_to_tiff_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test hanging file handles
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_close_tempfile_after_conversion(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
fd_count_before = len(
subprocess.check_output(
["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"]
)
.decode("utf8")
.split("\n")
)
pdf_data = pdf_file.read()
images_from_bytes = []
for i in range(50):
images_from_bytes.extend(convert_from_bytes(pdf_data))
# Closing the images
[im.close() for im in images_from_bytes]
pid = os.getpid()
fd_count_after = len(
subprocess.check_output(
["ls", "-l", "/proc/" + str(os.getpid()) + "/fd"]
)
.decode("utf8")
.split("\n")
)
# Add an error margin
self.assertTrue(abs(fd_count_before - fd_count_after) <= 3)
print(
"test_close_tempfile_after_conversion: {} sec".format(
time.time() - start_time
)
)
## Test poppler_path
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_use_poppler_path(self):
os.mkdir("./bin")
shutil.copy("/usr/bin/pdftoppm", "./bin")
shutil.copy("/usr/bin/pdfinfo", "./bin")
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test.pdf", poppler_path="./bin"
)
finally:
shutil.rmtree("./bin")
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_poppler_path: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
@unittest.skipIf(not os.name == "posix", "This test only works on posix systems")
def test_use_poppler_path_with_trailing_slash(self):
os.mkdir("./bin")
shutil.copy("/usr/bin/pdftoppm", "./bin")
shutil.copy("/usr/bin/pdfinfo", "./bin")
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test.pdf", poppler_path="./bin/"
)
finally:
shutil.rmtree("./bin")
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_poppler_path_with_trailing_slash: {} sec".format(
time.time() - start_time
)
)
## Test first page greater or equal to last_page
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_1_last_page_1(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=1, last_page=1
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_14_first_page_1_last_page_1: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_14_first_page_12_last_page_1(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test_14.pdf", first_page=12, last_page=1
)
self.assertTrue(len(images_from_path) == 0)
print(
"test_conversion_from_path_14_first_page_12_last_page_1: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test singlefile
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_using_dir_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_bytes) == 1)
self.assertTrue(
images_from_bytes[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_bytes]
print(
"test_conversion_from_bytes_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf",
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_14_single_file(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf",
output_folder=path,
output_file="test",
single_file=True,
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_14_single_file: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test file with same name in directory
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_with_containing_file_with_same_name(self):
start_time = time.time()
with TemporaryDirectory() as path:
shutil.copyfile("./tests/test.pdf", os.path.join(path, "test.pdf"))
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, output_file="test"
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(
images_from_path[0].filename == os.path.join(path, "test0001-1.ppm")
)
[im.close() for im in images_from_path]
print(
"test_conversion_from_path_using_dir_single_file: {} sec".format(
time.time() - start_time
)
)
## Test grayscale option
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_bytes(self):
start_time = time.time()
with open("./tests/test_14.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(pdf_file.read(), grayscale=True)
self.assertTrue(images_from_bytes[0].mode == "L")
print(
"test_conversion_to_grayscale_from_bytes_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_path(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test_14.pdf", grayscale=True)
self.assertTrue(images_from_path[0].mode == "L")
[im.close() for im in images_from_path]
print(
"test_conversion_to_grayscale_from_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_to_grayscale_from_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test_14.pdf", output_folder=path, grayscale=True
)
self.assertTrue(images_from_path[0].mode == "L")
[im.close() for im in images_from_path]
print(
"test_conversion_to_grayscale_from_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test pathlib support
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_using_dir(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
pathlib.Path("./tests/test.pdf"),
output_folder=pathlib.Path(path),
poppler_path=get_poppler_path(),
)
self.assertTrue(len(images_from_path) == 1)
[im.close() for im in images_from_path]
print(
"test_conversion_from_pathlib_path_using_dir: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_14(self):
start_time = time.time()
images_from_path = convert_from_path(pathlib.Path("./tests/test_14.pdf"))
self.assertTrue(len(images_from_path) == 14)
print(
"test_conversion_from_pathlib_path_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_pathlib_path_using_dir_14(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
pathlib.Path("./tests/test_14.pdf"),
output_folder=pathlib.Path(path),
poppler_path=get_poppler_path(),
)
self.assertTrue(len(images_from_path) == 14)
[im.close() for im in images_from_path]
print(
"test_conversion_from_pathlib_path_using_dir_14: {} sec".format(
(time.time() - start_time) / 14.0
)
)
## Test jpegopt parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf", fmt="jpeg", jpegopt={"quality": 100}
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(), fmt="jpg", jpegopt={"quality": 100}
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_progressive(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": True},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_not_progressive(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": False},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_progressive(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
fmt="jpg",
jpegopt={"quality": 100, "progressive": True},
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality_and_progressive: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_not_progressive(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
try:
images_from_bytes = convert_from_bytes(
pdf_file.read(), fmt="jpg", jpegopt={"quality": 100}
)
except PDFInfoNotInstalledError:
pass
print(
"test_conversion_from_bytes_with_quality_and_poppler_not_installed: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_quality_and_progressive_and_optimize(self):
start_time = time.time()
images_from_path = convert_from_path(
"./tests/test.pdf",
fmt="jpeg",
jpegopt={"quality": 100, "progressive": True, "optimize": True},
)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_quality_and_progressive_and_optimize: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_quality_and_progressive_and_optimize(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
fmt="jpg",
jpegopt={"quality": 100, "progressive": True, "optimize": True},
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_quality_and_progressive_and_optimize: {} sec".format(
time.time() - start_time
)
)
## Test size parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_int_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=400)
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_int_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_1d_tuple_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400,))
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_1d_tuple_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400, 400))
self.assertTrue(images_from_path[0].size == (400, 400))
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_invalid_size(self):
start_time = time.time()
try:
images_from_path = convert_from_path("./tests/test.pdf", size="bad value")
raise Exception("This should not happen")
except ValueError:
pass
print(
"test_conversion_from_path_with_invalid_size: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size_with_None_width(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(None, 400))
self.assertTrue(images_from_path[0].size[0] == 310)
self.assertTrue(images_from_path[0].size[1] == 400)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size_with_None_width: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_2d_tuple_size_with_None_height(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", size=(400, None))
self.assertTrue(images_from_path[0].size[0] == 400)
self.assertTrue(images_from_path[0].size[1] == 518)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_2d_tuple_size_with_None_height: {} sec".format(
time.time() - start_time
)
)
## Test hide annotations parameter
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_hide_annotations(self):
images_from_path = convert_from_path("./tests/test_annotations.pdf", hide_annotations=True)
start_time = time.time()
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_hide_annotations: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_hide_annotations(self):
start_time = time.time()
with open("./tests/test_annotations.pdf", "rb") as pdf_file:
images_from_bytes = convert_from_bytes(
pdf_file.read(),
hide_annotations=True,
)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_hide_annotations: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_hide_annotations_with_invalid_arg_combination(self):
start_time = time.time()
try:
images_from_path = convert_from_path(
"./tests/test_annotations.pdf",
hide_annotations=True,
use_pdftocairo=True,
)
raise Exception("This should not happen")
except NotImplementedError:
pass
print(
"test_conversion_from_path_with_hide_annotations_with_invalid_arg_combination: {} sec".format(
time.time() - start_time
)
)
## Test pdfinfo
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test.pdf")
self.assertTrue(info.get("Pages", 0) == 1)
print("test_pdfinfo_from_path: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
self.assertTrue(info.get("Pages", 0) == 1)
print("test_pdfinfo_from_bytes: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path_241(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test_241.pdf")
self.assertTrue(info.get("Pages", 0) == 241)
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes_241(self):
start_time = time.time()
with open("./tests/test_241.pdf", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
self.assertTrue(info.get("Pages", 0) == 241)
print("test_pdfinfo_from_bytes_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_path_invalid(self):
start_time = time.time()
try:
info = pdfinfo_from_path("./tests/test.jpg")
raise Exception("This should not happen")
except PDFPageCountError:
pass
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_from_bytes_invalid(self):
start_time = time.time()
try:
with open("./tests/test.jpg", "rb") as fh:
info = pdfinfo_from_bytes(fh.read())
raise Exception("This should not happen")
except PDFPageCountError:
pass
print("test_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
# Test conversion with paths_only
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_using_dir_paths_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
images_from_path = convert_from_path(
"./tests/test.pdf", output_folder=path, paths_only=True
)
self.assertTrue(len(images_from_path) == 1)
self.assertTrue(type(images_from_path[0]) == str)
print(
"test_conversion_from_path_using_dir: {} sec".format(
time.time() - start_time
)
)
# Test for issue #125
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed")
def test_multithread_conversion(self):
start_time = time.time()
files = ["./tests/test.pdf",] * 50
with Pool(10) as p:
res = p.map(convert_from_path, files)
self.assertTrue(len(res) == 50)
print("test_multithread_conversion: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_path_with_use_pdftocairo(self):
start_time = time.time()
images_from_path = convert_from_path("./tests/test.pdf", use_pdftocairo=True)
self.assertTrue(len(images_from_path) == 1)
print(
"test_conversion_from_path_with_use_pdftocairo: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_conversion_from_bytes_with_use_pdftocairo(self):
start_time = time.time()
with open("./tests/test.pdf", "rb") as fh:
images_from_bytes = convert_from_bytes(fh.read(), use_pdftocairo=True)
self.assertTrue(len(images_from_bytes) == 1)
print(
"test_conversion_from_bytes_with_use_pdftocairo: {} sec".format(
time.time() - start_time
)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_rawdates(self):
start_time = time.time()
info = pdfinfo_from_path("./tests/test.pdf", rawdates=True)
self.assertTrue("D:" in info["CreationDate"])
print(
"test_pdfinfo_rawdates: {} sec".format(time.time() - start_time)
)
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_pdfinfo_locked_pdf_with_userpw_only(self):
start_time = time.time()
with TemporaryDirectory() as path:
with open("./tests/test_locked_user_only.pdf", "rb") as pdf_file:
info = pdfinfo_from_bytes(
pdf_file.read(), userpw="pdf2image"
)
self.assertTrue("CreationDate" in info)
print(
"test_pdfinfo_locked_pdf_with_userpw_only: {} sec".format(time.time() - start_time)
)
@profile
def test_convert_from_functions_same_number_of_parameters(self):
start_time = time.time()
self.assertEqual(
len(signature(convert_from_path).parameters),
len(signature(convert_from_bytes).parameters),
)
print("test_convert_from_functions_same_number_of_parameters: {} sec".format(time.time() - start_time))
@profile
def test_pdfinfo_functions_same_number_of_parameters(self):
start_time = time.time()
self.assertEqual(
len(signature(pdfinfo_from_path).parameters),
len(signature(pdfinfo_from_bytes).parameters),
)
print("test_pdfinfo_functions_same_number_of_parameters: {} sec".format(time.time() - start_time))
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_timeout_pdfinfo_from_path_241(self):
start_time = time.time()
with self.assertRaises(PDFPopplerTimeoutError):
info = pdfinfo_from_path("./tests/test_241.pdf", timeout=0.00001)
print("test_timeout_pdfinfo_from_path_241: {} sec".format(time.time() - start_time))
@profile
@unittest.skipIf(not POPPLER_INSTALLED, "Poppler is not installed!")
def test_timeout_convert_from_path_241(self):
start_time = time.time()
with self.assertRaises(PDFPopplerTimeoutError):
imgs = convert_from_path("./tests/test_241.pdf", timeout=1)
print("test_timeout_convert_from_path_241: {} sec".format(time.time() - start_time))
if __name__ == "__main__":
unittest.main()
|
|
# -*- test-case-name: twisted.words.test.test_domish -*-
#
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DOM-like XML processing support.
This module provides support for parsing XML into DOM-like object structures
and serializing such structures to an XML string representation, optimized
for use in streaming XML applications.
"""
import types
from zope.interface import implements, Interface, Attribute
def _splitPrefix(name):
""" Internal method for splitting a prefixed Element name into its
respective parts """
ntok = name.split(":", 1)
if len(ntok) == 2:
return ntok
else:
return (None, ntok[0])
# Global map of prefixes that always get injected
# into the serializers prefix map (note, that doesn't
# mean they're always _USED_)
G_PREFIXES = { "http://www.w3.org/XML/1998/namespace":"xml" }
class _ListSerializer:
""" Internal class which serializes an Element tree into a buffer """
def __init__(self, prefixes=None, prefixesInScope=None):
self.writelist = []
self.prefixes = {}
if prefixes:
self.prefixes.update(prefixes)
self.prefixes.update(G_PREFIXES)
self.prefixStack = [G_PREFIXES.values()] + (prefixesInScope or [])
self.prefixCounter = 0
def getValue(self):
return u"".join(self.writelist)
def getPrefix(self, uri):
if not self.prefixes.has_key(uri):
self.prefixes[uri] = "xn%d" % (self.prefixCounter)
self.prefixCounter = self.prefixCounter + 1
return self.prefixes[uri]
def prefixInScope(self, prefix):
stack = self.prefixStack
for i in range(-1, (len(self.prefixStack)+1) * -1, -1):
if prefix in stack[i]:
return True
return False
def serialize(self, elem, closeElement=1, defaultUri=''):
# Optimization shortcuts
write = self.writelist.append
# Shortcut, check to see if elem is actually a chunk o' serialized XML
if isinstance(elem, SerializedXML):
write(elem)
return
# Shortcut, check to see if elem is actually a string (aka Cdata)
if isinstance(elem, types.StringTypes):
write(escapeToXml(elem))
return
# Further optimizations
parent = elem.parent
name = elem.name
uri = elem.uri
defaultUri, currentDefaultUri = elem.defaultUri, defaultUri
for p, u in elem.localPrefixes.iteritems():
self.prefixes[u] = p
self.prefixStack.append(elem.localPrefixes.keys())
# Inherit the default namespace
if defaultUri is None:
defaultUri = currentDefaultUri
if uri is None:
uri = defaultUri
prefix = None
if uri != defaultUri or uri in self.prefixes:
prefix = self.getPrefix(uri)
inScope = self.prefixInScope(prefix)
# Create the starttag
if not prefix:
write("<%s" % (name))
else:
write("<%s:%s" % (prefix, name))
if not inScope:
write(" xmlns:%s='%s'" % (prefix, uri))
self.prefixStack[-1].append(prefix)
inScope = True
if defaultUri != currentDefaultUri and \
(uri != defaultUri or not prefix or not inScope):
write(" xmlns='%s'" % (defaultUri))
for p, u in elem.localPrefixes.iteritems():
write(" xmlns:%s='%s'" % (p, u))
# Serialize attributes
for k,v in elem.attributes.items():
# If the attribute name is a tuple, it's a qualified attribute
if isinstance(k, types.TupleType):
attr_uri, attr_name = k
attr_prefix = self.getPrefix(attr_uri)
if not self.prefixInScope(attr_prefix):
write(" xmlns:%s='%s'" % (attr_prefix, attr_uri))
self.prefixStack[-1].append(attr_prefix)
write(" %s:%s='%s'" % (attr_prefix, attr_name,
escapeToXml(v, 1)))
else:
write((" %s='%s'" % ( k, escapeToXml(v, 1))))
# Shortcut out if this is only going to return
# the element (i.e. no children)
if closeElement == 0:
write(">")
return
# Serialize children
if len(elem.children) > 0:
write(">")
for c in elem.children:
self.serialize(c, defaultUri=defaultUri)
# Add closing tag
if not prefix:
write("</%s>" % (name))
else:
write("</%s:%s>" % (prefix, name))
else:
write("/>")
self.prefixStack.pop()
SerializerClass = _ListSerializer
def escapeToXml(text, isattrib = 0):
""" Escape text to proper XML form, per section 2.3 in the XML specification.
@type text: L{str}
@param text: Text to escape
@type isattrib: L{bool}
@param isattrib: Triggers escaping of characters necessary for use as
attribute values
"""
text = text.replace("&", "&")
text = text.replace("<", "<")
text = text.replace(">", ">")
if isattrib == 1:
text = text.replace("'", "'")
text = text.replace("\"", """)
return text
def unescapeFromXml(text):
text = text.replace("<", "<")
text = text.replace(">", ">")
text = text.replace("'", "'")
text = text.replace(""", "\"")
text = text.replace("&", "&")
return text
def generateOnlyInterface(list, int):
""" Filters items in a list by class
"""
for n in list:
if int.providedBy(n):
yield n
def generateElementsQNamed(list, name, uri):
""" Filters Element items in a list with matching name and URI. """
for n in list:
if IElement.providedBy(n) and n.name == name and n.uri == uri:
yield n
def generateElementsNamed(list, name):
""" Filters Element items in a list with matching name, regardless of URI.
"""
for n in list:
if IElement.providedBy(n) and n.name == name:
yield n
class SerializedXML(unicode):
""" Marker class for pre-serialized XML in the DOM. """
pass
class Namespace:
""" Convenience object for tracking namespace declarations. """
def __init__(self, uri):
self._uri = uri
def __getattr__(self, n):
return (self._uri, n)
def __getitem__(self, n):
return (self._uri, n)
class IElement(Interface):
"""
Interface to XML element nodes.
See L{Element} for a detailed example of its general use.
Warning: this Interface is not yet complete!
"""
uri = Attribute(""" Element's namespace URI """)
name = Attribute(""" Element's local name """)
defaultUri = Attribute(""" Default namespace URI of child elements """)
attributes = Attribute(""" Dictionary of element attributes """)
children = Attribute(""" List of child nodes """)
parent = Attribute(""" Reference to element's parent element """)
localPrefixes = Attribute(""" Dictionary of local prefixes """)
def toXml(prefixes=None, closeElement=1, defaultUri='',
prefixesInScope=None):
""" Serializes object to a (partial) XML document
@param prefixes: dictionary that maps namespace URIs to suggested
prefix names.
@type prefixes: L{dict}
@param closeElement: flag that determines whether to include the
closing tag of the element in the serialized
string. A value of C{0} only generates the
element's start tag. A value of C{1} yields a
complete serialization.
@type closeElement: L{int}
@param defaultUri: Initial default namespace URI. This is most useful
for partial rendering, where the logical parent
element (of which the starttag was already
serialized) declares a default namespace that should
be inherited.
@type defaultUri: L{str}
@param prefixesInScope: list of prefixes that are assumed to be
declared by ancestors.
@type prefixesInScope: L{list}
@return: (partial) serialized XML
@rtype: L{unicode}
"""
def addElement(name, defaultUri = None, content = None):
""" Create an element and add as child.
The new element is added to this element as a child, and will have
this element as its parent.
@param name: element name. This can be either a L{unicode} object that
contains the local name, or a tuple of (uri, local_name)
for a fully qualified name. In the former case,
the namespace URI is inherited from this element.
@type name: L{unicode} or L{tuple} of (L{unicode}, L{unicode})
@param defaultUri: default namespace URI for child elements. If
C{None}, this is inherited from this element.
@type defaultUri: L{unicode}
@param content: text contained by the new element.
@type content: L{unicode}
@return: the created element
@rtype: object providing L{IElement}
"""
def addChild(node):
""" Adds a node as child of this element.
The C{node} will be added to the list of childs of this element, and
will have this element set as its parent when C{node} provides
L{IElement}.
@param node: the child node.
@type node: L{unicode} or object implementing L{IElement}
"""
class Element(object):
""" Represents an XML element node.
An Element contains a series of attributes (name/value pairs), content
(character data), and other child Element objects. When building a document
with markup (such as HTML or XML), use this object as the starting point.
Element objects fully support XML Namespaces. The fully qualified name of
the XML Element it represents is stored in the C{uri} and C{name}
attributes, where C{uri} holds the namespace URI. There is also a default
namespace, for child elements. This is stored in the C{defaultUri}
attribute. Note that C{''} means the empty namespace.
Serialization of Elements through C{toXml()} will use these attributes
for generating proper serialized XML. When both C{uri} and C{defaultUri}
are not None in the Element and all of its descendents, serialization
proceeds as expected:
>>> from twisted.words.xish import domish
>>> root = domish.Element(('myns', 'root'))
>>> root.addElement('child', content='test')
<twisted.words.xish.domish.Element object at 0x83002ac>
>>> root.toXml()
u"<root xmlns='myns'><child>test</child></root>"
For partial serialization, needed for streaming XML, a special value for
namespace URIs can be used: C{None}.
Using C{None} as the value for C{uri} means: this element is in whatever
namespace inherited by the closest logical ancestor when the complete XML
document has been serialized. The serialized start tag will have a
non-prefixed name, and no xmlns declaration will be generated.
Similarly, C{None} for C{defaultUri} means: the default namespace for my
child elements is inherited from the logical ancestors of this element,
when the complete XML document has been serialized.
To illustrate, an example from a Jabber stream. Assume the start tag of the
root element of the stream has already been serialized, along with several
complete child elements, and sent off, looking like this::
<stream:stream xmlns:stream='http://etherx.jabber.org/streams'
xmlns='jabber:client' to='example.com'>
...
Now suppose we want to send a complete element represented by an
object C{message} created like:
>>> message = domish.Element((None, 'message'))
>>> message['to'] = 'user@example.com'
>>> message.addElement('body', content='Hi!')
<twisted.words.xish.domish.Element object at 0x8276e8c>
>>> message.toXml()
u"<message to='user@example.com'><body>Hi!</body></message>"
As, you can see, this XML snippet has no xmlns declaration. When sent
off, it inherits the C{jabber:client} namespace from the root element.
Note that this renders the same as using C{''} instead of C{None}:
>>> presence = domish.Element(('', 'presence'))
>>> presence.toXml()
u"<presence/>"
However, if this object has a parent defined, the difference becomes
clear:
>>> child = message.addElement(('http://example.com/', 'envelope'))
>>> child.addChild(presence)
<twisted.words.xish.domish.Element object at 0x8276fac>
>>> message.toXml()
u"<message to='user@example.com'><body>Hi!</body><envelope xmlns='http://example.com/'><presence xmlns=''/></envelope></message>"
As, you can see, the <presence/> element is now in the empty namespace, not
in the default namespace of the parent or the streams'.
@type uri: L{unicode} or None
@ivar uri: URI of this Element's name
@type name: L{unicode}
@ivar name: Name of this Element
@type defaultUri: L{unicode} or None
@ivar defaultUri: URI this Element exists within
@type children: L{list}
@ivar children: List of child Elements and content
@type parent: L{Element}
@ivar parent: Reference to the parent Element, if any.
@type attributes: L{dict}
@ivar attributes: Dictionary of attributes associated with this Element.
@type localPrefixes: L{dict}
@ivar localPrefixes: Dictionary of namespace declarations on this
element. The key is the prefix to bind the
namespace uri to.
"""
implements(IElement)
_idCounter = 0
def __init__(self, qname, defaultUri=None, attribs=None,
localPrefixes=None):
"""
@param qname: Tuple of (uri, name)
@param defaultUri: The default URI of the element; defaults to the URI
specified in L{qname}
@param attribs: Dictionary of attributes
@param localPrefixes: Dictionary of namespace declarations on this
element. The key is the prefix to bind the
namespace uri to.
"""
self.localPrefixes = localPrefixes or {}
self.uri, self.name = qname
if defaultUri is None and \
self.uri not in self.localPrefixes.itervalues():
self.defaultUri = self.uri
else:
self.defaultUri = defaultUri
self.attributes = attribs or {}
self.children = []
self.parent = None
def __getattr__(self, key):
# Check child list for first Element with a name matching the key
for n in self.children:
if IElement.providedBy(n) and n.name == key:
return n
# Tweak the behaviour so that it's more friendly about not
# finding elements -- we need to document this somewhere :)
if key.startswith('_'):
raise AttributeError(key)
else:
return None
def __getitem__(self, key):
return self.attributes[self._dqa(key)]
def __delitem__(self, key):
del self.attributes[self._dqa(key)];
def __setitem__(self, key, value):
self.attributes[self._dqa(key)] = value
def __str__(self):
""" Retrieve the first CData (content) node
"""
for n in self.children:
if isinstance(n, types.StringTypes): return n
return ""
def _dqa(self, attr):
""" Dequalify an attribute key as needed """
if isinstance(attr, types.TupleType) and not attr[0]:
return attr[1]
else:
return attr
def getAttribute(self, attribname, default = None):
""" Retrieve the value of attribname, if it exists """
return self.attributes.get(attribname, default)
def hasAttribute(self, attrib):
""" Determine if the specified attribute exists """
return self.attributes.has_key(self._dqa(attrib))
def compareAttribute(self, attrib, value):
""" Safely compare the value of an attribute against a provided value.
C{None}-safe.
"""
return self.attributes.get(self._dqa(attrib), None) == value
def swapAttributeValues(self, left, right):
""" Swap the values of two attribute. """
d = self.attributes
l = d[left]
d[left] = d[right]
d[right] = l
def addChild(self, node):
""" Add a child to this Element. """
if IElement.providedBy(node):
node.parent = self
self.children.append(node)
return self.children[-1]
def addContent(self, text):
""" Add some text data to this Element. """
c = self.children
if len(c) > 0 and isinstance(c[-1], types.StringTypes):
c[-1] = c[-1] + text
else:
c.append(text)
return c[-1]
def addElement(self, name, defaultUri = None, content = None):
result = None
if isinstance(name, type(())):
if defaultUri is None:
defaultUri = name[0]
self.children.append(Element(name, defaultUri))
else:
if defaultUri is None:
defaultUri = self.defaultUri
self.children.append(Element((defaultUri, name), defaultUri))
result = self.children[-1]
result.parent = self
if content:
result.children.append(content)
return result
def addRawXml(self, rawxmlstring):
""" Add a pre-serialized chunk o' XML as a child of this Element. """
self.children.append(SerializedXML(rawxmlstring))
def addUniqueId(self):
""" Add a unique (across a given Python session) id attribute to this
Element.
"""
self.attributes["id"] = "H_%d" % Element._idCounter
Element._idCounter = Element._idCounter + 1
def elements(self, uri=None, name=None):
"""
Iterate across all children of this Element that are Elements.
Returns a generator over the child elements. If both the C{uri} and
C{name} parameters are set, the returned generator will only yield
on elements matching the qualified name.
@param uri: Optional element URI.
@type uri: C{unicode}
@param name: Optional element name.
@type name: C{unicode}
@return: Iterator that yields objects implementing L{IElement}.
"""
if name is None:
return generateOnlyInterface(self.children, IElement)
else:
return generateElementsQNamed(self.children, name, uri)
def toXml(self, prefixes=None, closeElement=1, defaultUri='',
prefixesInScope=None):
""" Serialize this Element and all children to a string. """
s = SerializerClass(prefixes=prefixes, prefixesInScope=prefixesInScope)
s.serialize(self, closeElement=closeElement, defaultUri=defaultUri)
return s.getValue()
def firstChildElement(self):
for c in self.children:
if IElement.providedBy(c):
return c
return None
class ParserError(Exception):
""" Exception thrown when a parsing error occurs """
pass
def elementStream():
""" Preferred method to construct an ElementStream
Uses Expat-based stream if available, and falls back to Sux if necessary.
"""
try:
es = ExpatElementStream()
return es
except ImportError:
if SuxElementStream is None:
raise Exception("No parsers available :(")
es = SuxElementStream()
return es
try:
from twisted.web import sux
except:
SuxElementStream = None
else:
class SuxElementStream(sux.XMLParser):
def __init__(self):
self.connectionMade()
self.DocumentStartEvent = None
self.ElementEvent = None
self.DocumentEndEvent = None
self.currElem = None
self.rootElem = None
self.documentStarted = False
self.defaultNsStack = []
self.prefixStack = []
def parse(self, buffer):
try:
self.dataReceived(buffer)
except sux.ParseError, e:
raise ParserError, str(e)
def findUri(self, prefix):
# Walk prefix stack backwards, looking for the uri
# matching the specified prefix
stack = self.prefixStack
for i in range(-1, (len(self.prefixStack)+1) * -1, -1):
if prefix in stack[i]:
return stack[i][prefix]
return None
def gotTagStart(self, name, attributes):
defaultUri = None
localPrefixes = {}
attribs = {}
uri = None
# Pass 1 - Identify namespace decls
for k, v in attributes.items():
if k.startswith("xmlns"):
x, p = _splitPrefix(k)
if (x is None): # I.e. default declaration
defaultUri = v
else:
localPrefixes[p] = v
del attributes[k]
# Push namespace decls onto prefix stack
self.prefixStack.append(localPrefixes)
# Determine default namespace for this element; if there
# is one
if defaultUri is None:
if len(self.defaultNsStack) > 0:
defaultUri = self.defaultNsStack[-1]
else:
defaultUri = ''
# Fix up name
prefix, name = _splitPrefix(name)
if prefix is None: # This element is in the default namespace
uri = defaultUri
else:
# Find the URI for the prefix
uri = self.findUri(prefix)
# Pass 2 - Fix up and escape attributes
for k, v in attributes.items():
p, n = _splitPrefix(k)
if p is None:
attribs[n] = v
else:
attribs[(self.findUri(p)), n] = unescapeFromXml(v)
# Construct the actual Element object
e = Element((uri, name), defaultUri, attribs, localPrefixes)
# Save current default namespace
self.defaultNsStack.append(defaultUri)
# Document already started
if self.documentStarted:
# Starting a new packet
if self.currElem is None:
self.currElem = e
# Adding to existing element
else:
self.currElem = self.currElem.addChild(e)
# New document
else:
self.rootElem = e
self.documentStarted = True
self.DocumentStartEvent(e)
def gotText(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def gotCData(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def gotComment(self, data):
# Ignore comments for the moment
pass
entities = { "amp" : "&",
"lt" : "<",
"gt" : ">",
"apos": "'",
"quot": "\"" }
def gotEntityReference(self, entityRef):
# If this is an entity we know about, add it as content
# to the current element
if entityRef in SuxElementStream.entities:
self.currElem.addContent(SuxElementStream.entities[entityRef])
def gotTagEnd(self, name):
# Ensure the document hasn't already ended
if self.rootElem is None:
# XXX: Write more legible explanation
raise ParserError, "Element closed after end of document."
# Fix up name
prefix, name = _splitPrefix(name)
if prefix is None:
uri = self.defaultNsStack[-1]
else:
uri = self.findUri(prefix)
# End of document
if self.currElem is None:
# Ensure element name and uri matches
if self.rootElem.name != name or self.rootElem.uri != uri:
raise ParserError, "Mismatched root elements"
self.DocumentEndEvent()
self.rootElem = None
# Other elements
else:
# Ensure the tag being closed matches the name of the current
# element
if self.currElem.name != name or self.currElem.uri != uri:
# XXX: Write more legible explanation
raise ParserError, "Malformed element close"
# Pop prefix and default NS stack
self.prefixStack.pop()
self.defaultNsStack.pop()
# Check for parent null parent of current elem;
# that's the top of the stack
if self.currElem.parent is None:
self.currElem.parent = self.rootElem
self.ElementEvent(self.currElem)
self.currElem = None
# Anything else is just some element wrapping up
else:
self.currElem = self.currElem.parent
class ExpatElementStream:
def __init__(self):
import pyexpat
self.DocumentStartEvent = None
self.ElementEvent = None
self.DocumentEndEvent = None
self.error = pyexpat.error
self.parser = pyexpat.ParserCreate("UTF-8", " ")
self.parser.StartElementHandler = self._onStartElement
self.parser.EndElementHandler = self._onEndElement
self.parser.CharacterDataHandler = self._onCdata
self.parser.StartNamespaceDeclHandler = self._onStartNamespace
self.parser.EndNamespaceDeclHandler = self._onEndNamespace
self.currElem = None
self.defaultNsStack = ['']
self.documentStarted = 0
self.localPrefixes = {}
def parse(self, buffer):
try:
self.parser.Parse(buffer)
except self.error, e:
raise ParserError, str(e)
def _onStartElement(self, name, attrs):
# Generate a qname tuple from the provided name
qname = name.split(" ")
if len(qname) == 1:
qname = ('', name)
# Process attributes
for k, v in attrs.items():
if k.find(" ") != -1:
aqname = k.split(" ")
attrs[(aqname[0], aqname[1])] = v
del attrs[k]
# Construct the new element
e = Element(qname, self.defaultNsStack[-1], attrs, self.localPrefixes)
self.localPrefixes = {}
# Document already started
if self.documentStarted == 1:
if self.currElem != None:
self.currElem.children.append(e)
e.parent = self.currElem
self.currElem = e
# New document
else:
self.documentStarted = 1
self.DocumentStartEvent(e)
def _onEndElement(self, _):
# Check for null current elem; end of doc
if self.currElem is None:
self.DocumentEndEvent()
# Check for parent that is None; that's
# the top of the stack
elif self.currElem.parent is None:
self.ElementEvent(self.currElem)
self.currElem = None
# Anything else is just some element in the current
# packet wrapping up
else:
self.currElem = self.currElem.parent
def _onCdata(self, data):
if self.currElem != None:
self.currElem.addContent(data)
def _onStartNamespace(self, prefix, uri):
# If this is the default namespace, put
# it on the stack
if prefix is None:
self.defaultNsStack.append(uri)
else:
self.localPrefixes[prefix] = uri
def _onEndNamespace(self, prefix):
# Remove last element on the stack
if prefix is None:
self.defaultNsStack.pop()
## class FileParser(ElementStream):
## def __init__(self):
## ElementStream.__init__(self)
## self.DocumentStartEvent = self.docStart
## self.ElementEvent = self.elem
## self.DocumentEndEvent = self.docEnd
## self.done = 0
## def docStart(self, elem):
## self.document = elem
## def elem(self, elem):
## self.document.addChild(elem)
## def docEnd(self):
## self.done = 1
## def parse(self, filename):
## for l in open(filename).readlines():
## self.parser.Parse(l)
## assert self.done == 1
## return self.document
## def parseFile(filename):
## return FileParser().parse(filename)
|
|
"""Implements directory watcher using ReadDirectoryChangesW.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import errno
import logging
import os
# E0401: Unable to import windows only pakages
import winerror # pylint: disable=E0401
import pywintypes # pylint: disable=E0401
import win32con # pylint: disable=E0401
import win32event # pylint: disable=E0401
import win32file # pylint: disable=E0401
from . import dirwatch_base
_BUFFER_SIZE = 32768
_INVALID_HANDLE_VALUE = -1
_FILE_LIST_DIRECTORY = 0x0001
_ACTION_NAMES = {1: 'CREATE',
2: 'DELETE',
3: 'MODIFY',
4: 'MOVEFROM',
5: 'MOVETO'}
_EVENTS = {'CREATE': dirwatch_base.DirWatcherEvent.CREATED,
'DELETE': dirwatch_base.DirWatcherEvent.DELETED,
'MODIFY': dirwatch_base.DirWatcherEvent.MODIFIED,
'MOVEFROM': dirwatch_base.DirWatcherEvent.DELETED,
'MOVETO': dirwatch_base.DirWatcherEvent.CREATED}
_LOGGER = logging.getLogger(__name__)
class WindowsDirInfo(object):
"""Windows directory watcher info."""
__slots__ = (
'id',
'path',
'overlapped',
'buffer',
'file'
)
def __init__(self, path):
self.path = path
self.overlapped = pywintypes.OVERLAPPED()
self.overlapped.hEvent = win32event.CreateEvent(None, 1, 0, None)
self.buffer = win32file.AllocateReadBuffer(_BUFFER_SIZE)
self.file = win32file.CreateFile(
self.path,
_FILE_LIST_DIRECTORY,
(
win32con.FILE_SHARE_READ |
win32con.FILE_SHARE_WRITE |
win32con.FILE_SHARE_DELETE
),
None,
win32con.OPEN_EXISTING,
(
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED
),
None
)
self.id = self.overlapped.hEvent.handle
def close(self):
"""Closes the directory info."""
self.file.close()
self.overlapped.hEvent.close()
class WindowsDirWatcher(dirwatch_base.DirWatcher):
"""Windows directory watcher implementation."""
__slots__ = (
'_dir_infos',
'_changed'
)
def __init__(self, watch_dir=None):
self._dir_infos = {}
self._changed = collections.deque()
super(WindowsDirWatcher, self).__init__(watch_dir)
@staticmethod
def _read_dir(info):
"""Reads the directory for changes async.
"""
try:
win32file.ReadDirectoryChangesW(
info.file,
info.buffer,
False,
(
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_DIR_NAME |
win32con.FILE_NOTIFY_CHANGE_ATTRIBUTES |
win32con.FILE_NOTIFY_CHANGE_SIZE |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE |
win32con.FILE_NOTIFY_CHANGE_SECURITY
),
info.overlapped
)
return True
except pywintypes.error as exc:
_LOGGER.warning('Failed to read directory \'%s\': %s',
info.path, exc)
return False
def _add_dir(self, watch_dir):
"""Add `directory` to the list of watched directories.
:param watch_dir: watch directory real path
:returns: watch id
"""
info = WindowsDirInfo(watch_dir)
if info.file == _INVALID_HANDLE_VALUE or not self._read_dir(info):
info.close()
raise OSError(errno.ENOENT, 'No such file or directory',
watch_dir)
self._dir_infos[info.id] = info
return info.id
def _remove_dir(self, watch_id):
"""Remove `directory` from the list of watched directories.
:param watch_id: watch id
"""
info = self.dir_infos.get(watch_id)
if info is not None:
info.close()
del self._dir_infos[watch_id]
def _wait_for_events(self, timeout):
"""Wait for directory change event for up to ``timeout`` seconds.
:param timeout:
Time in milliseconds to wait for an event (-1 means forever)
:returns:
``True`` if events were received, ``False`` otherwise.
"""
ids = [k for k in self._dir_infos]
rc = win32event.WaitForMultipleObjects(ids, 0, timeout)
if rc == win32event.WAIT_TIMEOUT:
return False
elif rc == win32event.WAIT_FAILED:
_LOGGER.error('Wait on %r failed', ids)
return False
idx = rc - win32event.WAIT_OBJECT_0
info = self._dir_infos.get(ids[idx])
if info is not None:
self._changed.append(info)
return True
return False
def _preempt_watch(self, info, result):
"""Deletes the watch preemptively.
"""
result.append((dirwatch_base.DirWatcherEvent.DELETED, info.path))
info.close()
del self._dir_infos[info.id]
def _read_events(self):
"""Reads the events from the system and formats as ``DirWatcherEvent``.
:returns: List of ``(DirWatcherEvent, <path>)``
"""
result = []
while self._changed:
info = self._changed.popleft()
try:
size = win32file.GetOverlappedResult(info.file,
info.overlapped,
False)
except win32file.error as exc:
win32event.ResetEvent(info.overlapped.hEvent)
_LOGGER.warning(
'Failed to get directory changes for \'%s\': %s',
info.path, exc)
if exc.winerror == winerror.ERROR_ACCESS_DENIED:
self._preempt_watch(info, result)
continue
notifications = win32file.FILE_NOTIFY_INFORMATION(info.buffer,
size)
for action, path in notifications:
action_name = _ACTION_NAMES.get(action)
path = os.path.join(info.path, path)
if action_name is None:
_LOGGER.error('Received unknown action (%s, \'%s\')',
action, path)
continue
_LOGGER.debug('Received event (%s, \'%s\')', action_name, path)
event = _EVENTS.get(action_name)
if event is not None:
result.append((event, path))
win32event.ResetEvent(info.overlapped.hEvent)
if not self._read_dir(info):
self._preempt_watch(info, result)
return result
|
|
import sys
from copy import copy
from numpy import *
from scipy.signal import medfilt as MF
from numpy.random import normal, seed
from statsmodels.robust import mad
from .core import *
from .lpf import *
from .extcore import *
from .lpfsd import LPFSD
from george.kernels import ConstantKernel, Matern32Kernel
class LPFSR(LPFSD):
def __init__(self, passband, lctype='target', use_ldtk=False, n_threads=1, night=2, pipeline='gc'):
super().__init__(passband, lctype, use_ldtk, n_threads, night, pipeline)
self.lnlikelihood = self.lnlikelihood_wn
self.noise = 'white'
self.fluxes = asarray(self.fluxes)
self.fluxes_m = self.fluxes.mean(0)
self.fluxes /= self.fluxes_m
self.wn_estimates = array([sqrt(2) * mad(diff(f)) for f in self.fluxes])
self.times = self.times[0]
self.ctimes = self.times-self.times.mean()
# Setup priors
# ------------
# System parameters
# -----------------
self.priors = [NP(125.417380, 8e-5, 'tc'), # 0 - Transit centre
NP(3.06785547, 4e-7, 'p'), # 1 - Period
NP(4.17200000, 3e-2, 'rho'), # 2 - Stellar density
NP(0.16100000, 2e-2, 'b')] # 3 - Impact parameter
# Area ratio
# ----------
self._sk2 = len(self.priors)
self.priors.extend([UP(0.165**2, 0.195**2, 'k2_%s'%pb)
for pb in self.unique_pbs]) ## 4 - planet-star area ratio
# Limb darkening
# --------------
self._sq1 = len(self.priors)
self._sq2 = self._sq1+1
for ipb in range(self.npb):
self.priors.extend([UP(0, 1, 'q1_%i'%ipb), ## sq1 + 2*ipb -- limb darkening q1
UP(0, 1, 'q2_%i'%ipb)]) ## sq2 + 2*ipb -- limb darkening q2
## Baseline
## --------
self._sbl = len(self.priors)
self._nbl = 3
for ilc in range(self.nlc):
self.priors.append(UP( 0.0, 2.0, 'bcn_%i'%ilc)) # sbl + ilc -- Baseline constant
self.priors.append(UP(-1.0, 1.0, 'btl_%i'%ilc)) # sbl + ilc -- Linear time trend
self.priors.append(UP(-1.0, 1.0, 'bal_%i'%ilc)) # sbl + ilc -- Linear airmass trend
## White noise
## -----------
self._swn = len(self.priors)
self.priors.extend([UP(3e-4, 4e-3, 'e_%i'%ilc)
for ilc in range(self.nlc)]) ## sqn + ilc -- Average white noise
self.ps = PriorSet(self.priors)
self.set_pv_indices()
self.prior_kw = NP(0.1707, 3.2e-4, 'kw', lims=(0.16,0.18))
## Limb darkening with LDTk
## ------------------------
if use_ldtk:
self.sc = LDPSetCreator([4150,100], [4.6,0.2], [-0.14,0.16], self.filters)
self.lp = self.sc.create_profiles(2000)
self.lp.resample_linear_z()
self.lp.set_uncertainty_multiplier(2)
# Use mock data
# -------------
if passband == 'nb_mock' and type(self) == LPFSR:
self.create_mock_nb_dataset()
def set_pv_indices(self, sbl=None, swn=None):
self.ik2 = [self._sk2+pbid for pbid in self.gpbids]
self.iq1 = [self._sq1+pbid*2 for pbid in self.gpbids]
self.iq2 = [self._sq2+pbid*2 for pbid in self.gpbids]
self.uq1 = np.unique(self.iq1)
self.uq2 = np.unique(self.iq2)
sbl = sbl if sbl is not None else self._sbl
self.ibcn = [sbl + 3 * ilc for ilc in range(self.nlc)]
self.ibtl = [sbl + 3 * ilc + 1 for ilc in range(self.nlc)]
self.ibal = [sbl + 3 * ilc + 2 for ilc in range(self.nlc)]
swn = swn if swn is not None else self._swn
self.iwn = [swn+ilc for ilc in range(self.nlc)]
def setup_gp(self):
pass
def map_to_gp(self, pv):
raise NotImplementedError
def lnposterior(self, pv):
_k = median(sqrt(pv[self.ik2]))
return super().lnposterior(pv) + self.prior_kw.log(_k)
def compute_transit(self, pv):
_a = as_from_rhop(pv[2], pv[1])
_i = mt.acos(pv[3] / _a)
_k = sqrt(pv[self.ik2]).mean()
kf = pv[self.ik2] / _k ** 2
a, b = sqrt(pv[self.iq1]), 2. * pv[self.iq2]
self._wrk_ld[:, 0] = a * b
self._wrk_ld[:, 1] = a * (1. - b)
z = of.z_circular(self.times, pv[0], pv[1], _a, _i, self.nt)
f = self.tm(z, _k, self._wrk_ld)
return (kf * (f - 1.) + 1.).T
def compute_lc_model(self, pv, copy=False):
bl = self.compute_baseline(pv)
tr = self.compute_transit(pv)
self._wrk_lc[:] = bl*tr/tr.mean(0)
return self._wrk_lc if not copy else self._wrk_lc.copy()
def compute_baseline(self, pv):
bl = ( pv[self.ibcn][:,newaxis]
+ pv[self.ibtl][:,newaxis] * self.ctimes
+ pv[self.ibal][:,newaxis] * self.airmass)
return bl
def lnlikelihood_wn(self, pv):
fluxes_m = self.compute_lc_model(pv)
return sum([ll_normal_es(fo, fm, wn) for fo,fm,wn in zip(self.fluxes, fluxes_m, pv[self.iwn])])
def fit_baseline(self, pvpop):
from numpy.linalg import lstsq
pvt = pvpop.copy()
X = array([ones(self.npt), self.ctimes, self.airmass])
for i in range(self.nlc):
pv = lstsq(X.T, self.fluxes[i])[0]
pvt[:, self.ibcn[i]] = normal(pv[0], 0.001, size=pvt.shape[0])
pvt[:, self.ibtl[i]] = normal(pv[1], 0.01 * abs(pv[1]), size=pvt.shape[0])
pvt[:, self.ibal[i]] = normal(pv[2], 0.01 * abs(pv[2]), size=pvt.shape[0])
return pvt
def fit_ldc(self, pvpop, emul=1.):
pvt = pvpop.copy()
uv, uve = self.lp.coeffs_qd()
us = array([normal(um, emul*ue, size=pvt.shape[0]) for um,ue in zip(uv[:,0],uve[:,0])]).T
vs = array([normal(vm, emul*ve, size=pvt.shape[0]) for vm,ve in zip(uv[:,1],uve[:,1])]).T
q1s, q2s = map_uv_to_qq(us, vs)
pvt[:, self.uq1] = q1s
pvt[:, self.uq2] = q2s
return pvt
def create_mock_nb_dataset(self):
tc, p, rho, b = 125.417380, 3.06785547, 4.17200000, 0.161
ks = np.full(self.npb, 0.171)
ks[1::3] = 0.170
ks[2::3] = 0.172
ks[[7, 13]] = 0.173
q1 = array([0.581, 0.582, 0.590, 0.567, 0.541, 0.528, 0.492, 0.490,
0.461, 0.440, 0.419, 0.382, 0.380, 0.368, 0.344, 0.328,
0.320, 0.308, 0.301, 0.292])
q2 = array([0.465, 0.461, 0.446, 0.442, 0.425, 0.427, 0.414, 0.409,
0.422, 0.402, 0.391, 0.381, 0.379, 0.373, 0.369, 0.365,
0.362, 0.360, 0.360, 0.358])
seed(0)
cam = normal(0, 0.03, self.nlc)
ctm = normal(0, 0.08, self.nlc)
seed(0)
pv = self.ps.generate_pv_population(1)[0]
pv[:4] = tc, p, rho, b
pv[self.ik2] = ks ** 2
pv[self.iq1] = q1
pv[self.iq2] = q2
pv[self._sbl:] = 1.
fms = self.compute_transit(pv).copy()
for i, fm in enumerate(fms):
fm[:] += (normal(0, self.wn_estimates[i], fm.size)
+ cam[i] * (self.airmass - self.airmass.mean())
+ ctm[i] * (self.times[0] - self.times[0].mean()))
self.fluxes = asarray(fms)
self.fluxes_m = self.fluxes.mean(0)
self.fluxes /= self.fluxes_m
self.wn_estimates = array([sqrt(2) * mad(diff(f)) for f in self.fluxes])
|
|
# Showroom Downloader
import subprocess
import threading
import datetime
import logging
import time
import os
import shutil
from .constants import TOKYO_TZ, FULL_DATE_FMT
from .utils import format_name, strftime
download_logger = logging.getLogger('showroom.downloader')
class Downloader(object):
"""
Handles downloads for a parent Watcher.
Created with a room, a client, and an output directory. Started with start(),
then call wait() to wait on the underlying Popen process. Wait will return when the
download process ends, either because the stream has completed, because it timed out,
or because it was terminated from outside the thread. On POSIX systems, a negative
return code from wait() signals termination/timeout, however this is not portable.
Regardless of why the download finished, Watcher still needs to check live status, so
the only reason why termination vs. completion matters is potentially responding to
repeated timeouts (e.g. like that time all the streams failed for 4 hours)
Attributes:
destdir: final destination for the download
tempdir: temporary ("active") directory
outfile: name of the file being written to
all_files: list of files this downloader has written, eventually will be logged
when the stream completes
NOTE: all_files is the only attribute that has any reason to be public
Properties:
stream_data: stream data returned by showroom
protocol: protocol in use, either rtmp or hls (use enum?)
rtmp_url, lhls_url, hls_url: separate handles for rtmp and hls urls
timed_out: whether the last wait() timed out
Methods: (remove this before release)
start
wait
get_info
is_running -- whether the child process is running
stop, kill -- usually called from outside the current thread
update_stream_url -- internal use only
move_to_dest -- internal use only
switch_protocol -- don't change protocol, change downloaders
TODO:
Logging (e.g. "download has started" or let Watcher handle this)
Fix ffmpeg logging on Windows without pulling in PATH
DONE:
For now, instead of the below, just use rtmp streams:
Separate downloaders for rtmp and hls streams? That is, if one is failing
instead of switching the protocol, have Watcher pop off the failing stream
and make a new downloader, handing the failing downloader off to some
cleanup thread via queue. Or can we handle all cleanup here?
Add a wait() function that wraps the internal Popen process and checks for fail
states without bothering the wrapping Watcher. Raise on failure?
TESTING:
hls recording fails awfully. find out why
For the failure detection to work properly, must ffmpeg be compiled with librtmp? (yes)
"""
def __init__(self, room, client, settings, default_protocol='rtmp'):
self._room = room
self._client = client
self._rootdir = settings.directory.output
self._logging = settings.ffmpeg.logging
self._ffmpeg_path = settings.ffmpeg.path
self._ffmpeg_container = settings.ffmpeg.container
self.destdir, self.tempdir, self.outfile = "", "", ""
self._protocol = default_protocol
self._rtmp_url = ""
self._hls_url = ""
self._lhls_url = ""
self._stream_data = []
self._process = None
# self._timeouts = 0
# self._timed_out = False
self._pingouts = 0
self._lock = threading.Lock()
# Index of dead processes, list of tuples
# (outfile, destdir, tempdir, process)
self._dead_files = []
# keep a list of previous outfile names
self.all_files = []
@property
def rtmp_url(self):
return self._rtmp_url
@property
def hls_url(self):
return self._hls_url
@property
def lhls_url(self):
return self._lhls_url
@property
def stream_url(self):
return getattr(self, '_{}_url'.format(self.protocol))
@property
def protocol(self):
return self._protocol
def get_info(self):
with self._lock:
return {"streaming_urls": self._stream_data,
"protocol": self._protocol,
"filename": self.outfile,
"dest_dir": self.destdir,
"active": self.is_running(),
"timeouts": 0,
"pingouts": self._pingouts,
"completed_files": self.all_files.copy()}
def is_running(self):
"""Checks if the child process is running."""
if self._process:
return self._process.poll() is None
else:
return False
def switch_protocol(self):
"""Switches protocol between rtmp and hls."""
with self._lock:
if self.protocol == 'rtmp':
self._protocol = 'hls'
else:
self._protocol = 'rtmp'
def wait(self):
"""
Waits for a download to finish.
Returns:
returncode of the child process, or None if a ping loop of death was detected.
On POSIX systems, this will be a negative value if the process
was terminated (e.g. by timeout) rather than exiting normally.
Will wait progressively longer if the download keeps timing out.
TODO:
Detect ping loop of death ? Or is timeout sufficient?
Check for other issues, e.g. black 540p
Logging
Reset _pingouts?
I need to check for both pinging and a timeout
Because the ping message comes from librtmp, and that might not be part
of ffmpeg
Check periodically that the stream is still live:
I've had a couple zombie streams even with the new system
(no ffmpeg logs, so no idea what happened)
"""
num_pings = 0
# Some streams seem to start fine with up to 4 pings before beginning download?
# More investigation is needed
max_pings = 1 + self._pingouts
# timeout after 1 minute
timeout = datetime.datetime.now() + datetime.timedelta(minutes=1)
try:
for line in self._process.stderr:
# TODO: add mpegts or other variants depending on the container settings? or no?
# if "Output #0, mp4" in line:
if "Output #0" in line:
self._process.communicate()
self.move_to_dest()
self._pingouts = 0
break
elif "HandleCtrl, Ping" in line:
num_pings += 1
if num_pings > max_pings:
# The main issue with this is that the slain processes will not have their files moved
# But I think this is preferable to the other solutions I've come up with.
# For future reference, those were:
#
# 1) Sending SIGINT then continuing to read stderr until it exited (sometimes it doesn't)
# 2) Sending SIGINT, storing a reference to the process, then restarting the download.
# This prevents the process from being garbage collected until the Watcher is
# 3) Sending SIGINT, then storing info about src and dest paths for the stopped download.
# If a reference to the process is NOT stored, there's no way to be sure it has finished writing
# (if it's writing at all). The only way was to give them a grace period and then just start
# moving, but this adds undesirable time to the cleanup phase, when we may want to restart
# a falsely completed Watcher asap.
# 4) Just moving the file straightaway. This is obviously bad since ffmpeg takes a few moments to
# finish.
# NOTE: only option #1 was actually tried, the others were partially written before being
# abandoned as their problems became clear
#
# Two additional options exist (not mutually exclusive):
# 1) Passing the dead processes off to a queue and having another thread clean up.
# 2) Having regular maintenance sweep the active folder and move files it can be sure are done
# to their proper folders.
#
# I *probably* need to use 1) eventually, especially once I figure out how to actually end
# stuck processes without killing the parent. But it requires a lot more code.
# Until then let's just see how this works.
#
# When that time does come, a Downloader copy constructor may be useful.
download_logger.debug("Download pinged {} times: Stopping".format(num_pings))
self._pingouts += 1
self.stop()
# close stderr to force the loop to exit
time.sleep(0.1)
self._process.stderr.close()
time.sleep(0.1)
# process will be garbage collected when the next one is started, or the Watcher dies
# self._process = None
# This *should* work for newer builds of FFmpeg without librtmp.
# Only question is whether 1 minute is too long (or too short).
# UPDATE: Why doesn't this ever seem to work?
# is it because FFmpeg freezes output and hangs now? so we're never getting another line to iterate over
# elif datetime.datetime.now() > timeout:
# download_logger.debug("Download of {} timed out".format(self.outfile))
# self.stop()
# time.sleep(0.1)
# self._process.stderr.close()
# time.sleep(0.1)
else:
time.sleep(0.2)
except ValueError:
download_logger.debug('ffmpeg stderr closed unexpectedly')
# Is it possible for the process to end prematurely?
return self._process.returncode
def stop(self):
"""Stop an active download.
Returns immediately, check is_running() for success.
"""
# trying this instead of SIGTERM
# http://stackoverflow.com/a/6659191/3380530
# self._process.send_signal(SIGINT)
# Or not. SIGINT doesn't exist on Windows
self._process.terminate()
def kill(self):
"""Kill an active download.
Like stop, only tries to kill the process instead of just terminating it.
Only use this as a last resort, as it will render any video unusable."""
self._process.kill()
def move_to_dest(self):
"""Moves output file to its final destination."""
destpath = self._move_to_dest(self.outfile, self.tempdir, self.destdir)
if destpath:
self.all_files.append(destpath)
download_logger.info('Completed {}'.format(destpath))
with self._lock:
self.outfile = ""
@staticmethod
def _move_to_dest(outfile, tempdir, destdir):
srcpath = '{}/{}'.format(tempdir, outfile)
destpath = '{}/{}'.format(destdir, outfile)
download_logger.debug('File transfer: {} -> {}'.format(srcpath, destpath))
if os.path.exists(destpath):
raise FileExistsError
else:
try:
shutil.move(srcpath, destpath)
except FileNotFoundError:
download_logger.debug('File not found: {} -> {}'.format(srcpath, destpath))
return
else:
return destpath
def update_streaming_url(self):
data = self._client.streaming_url(self._room.room_id)
self._stream_data = data
download_logger.debug('{}'.format(self._stream_data))
# TODO: it shouldn't still attempt to start up without a fresh url
if not data:
return
rtmp_streams = []
hls_streams = []
lhls_streams = []
# TODO: sort according to a priority list defined in config file
# e.g. ('rtmp', 'lhls', 'hls'), or just "rtmp" (infer the others from defaults)
#
for stream in data:
if stream['type'] == 'rtmp':
rtmp_streams.append((int(stream['quality']), '/'.join((stream['url'], stream['stream_name']))))
elif stream['type'] == 'hls':
hls_streams.append((int(stream['quality']), stream['url']))
elif stream['type'] == 'lhls':
lhls_streams.append((int(stream['quality']), stream['url']))
try:
new_rtmp_url = sorted(rtmp_streams)[-1][1]
except IndexError as e:
# download_logger.warn("Caught IndexError while reading RTMP url: {}\n{}".format(e, data))
new_rtmp_url = ""
try:
new_hls_url = sorted(hls_streams)[-1][1]
except IndexError as e:
# download_logger.warn("Caught IndexError while reading HLS url: {}\n{}".format(e, data))
new_hls_url = ""
try:
new_lhls_url = sorted(lhls_streams)[-1][1]
except IndexError as e:
# download_logger.warn("Caught IndexError while reading HLS url: {}\n{}".format(e, data))
new_lhls_url = ""
with self._lock:
self._rtmp_url = new_rtmp_url
self._hls_url = new_hls_url
self._lhls_url = new_lhls_url
# def update_streaming_url_web(self):
# """Updates streaming urls from the showroom website.
# Fallback if api changes again
# But pretty sure this doesn't work anymore
# """
# # TODO: add an endpoint for fetching the browser page
# r = self._client._session.get(self._room.long_url)
# if r.ok:
# match = hls_url_re1.search(r.text)
# # TODO: check if there was a match
# if not match:
# # no url found in the page
# # probably the stream has ended but is_live returned true
# # just don't update the urls
# # except what happens if they are still "" ?
# return
# hls_url = match.group(0)
# rtmps_url = match.group(1).replace('https', 'rtmps')
# rtmp_url = "rtmp://{}.{}.{}.{}:1935/liveedge/{}".format(*match.groups()[1:])
# with self._lock:
# self._rtmp_url = rtmp_url
# self._hls_url = hls_url
# self._rtmps_url = rtmps_url
# def update_streaming_url_old(self):
# """Updates streaming urls from the showroom website."""
# data = self.client.json('https://www.showroom-live.com/room/get_live_data',
# params={'room_id': self._room.room_id},
# headers={'Referer': self._room.long_url})
# if not data:
# pass # how to resolve this? can it even happen without throwing an exception earlier?
#
# # TODO: Check that strings aren't empty
# stream_name = data['streaming_name_rtmp']
# stream_url = data["streaming_url_rtmp"]
# new_rtmp_url = '{}/{}'.format(stream_url, stream_name)
# new_hls_url = data["streaming_url_hls"]
#
# with self._lock:
# if new_rtmp_url != self.rtmp_url:
# # TODO: log url change
# # TODO: Trigger this message when the stream first goes live, from elsewhere
# # print('Downloading {}\'s Showroom'.format(self.room.name))
# # self.announce((self.web_url, self.stream_url))
# pass
#
# if new_hls_url != self.hls_url:
# # TODO: log url change
# pass
#
# self._rtmp_url = new_rtmp_url
# self._hls_url = new_hls_url
def start(self):
"""
Starts the download.
Refreshes the streaming url, generates a new file name, and starts a new ffmpeg
process.
Returns:
datetime object representing the time the download started
"""
tokyo_time = datetime.datetime.now(tz=TOKYO_TZ)
# TODO: Does this work on Windows now?
env = os.environ.copy()
# remove proxy information
for key in ('http_proxy', 'https_proxy', 'HTTP_PROXY', 'HTTPS_PROXY'):
env.pop(key, None)
self.update_streaming_url()
# TODO: rework this whole process to include lhls, and make it configurable
# and less braindead
self._protocol = 'rtmp'
self._ffmpeg_container = 'mp4'
extra_args = []
# Fall back to HLS if no RTMP stream available
# Better to do this here or in update_streaming_url?
# There's a possible race condition here, if some external thread modifies either of these
if not self._rtmp_url and self._protocol == 'rtmp':
download_logger.warn('Using HLS downloader for {}'.format(self._room.handle))
self._protocol = 'hls'
# extra_args = []
# force using TS container with HLS
# this is causing more problems than it solves
# if self.protocol in ('hls', 'lhls'):
# self._ffmpeg_container = 'ts'
# 2020-01-10: those problems were preferrable to completely unwatchable streams
if self.protocol in ('hls', 'lhls'):
extra_args = ["-copyts", "-bsf:a", "aac_adtstoasc"]
if self.protocol in ('hls', 'lhls') and self._ffmpeg_container == 'mp4':
extra_args = ["-bsf:a", "aac_adtstoasc"]
# I don't think this is needed?
# if self._ffmpeg_container == 'ts':
# extra_args.extend(['-bsf:v', 'h264_mp4toannexb'])
# elif self._ffmpeg_container != 'mp4':
# # TODO: support additional container formats, e.g. FLV
# self._ffmpeg_container = 'mp4'
temp, dest, out = format_name(self._rootdir,
strftime(tokyo_time, FULL_DATE_FMT),
self._room, ext=self._ffmpeg_container)
with self._lock:
self.tempdir, self.destdir, self.outfile = temp, dest, out
if self._logging is True:
log_file = os.path.normpath('{}/logs/{}.log'.format(self.destdir, self.outfile))
env.update({'FFREPORT': 'file={}:level=40'.format(log_file)})
# level=48 is debug mode, with lots and lots of extra information
# maybe too much
normed_outpath = os.path.normpath('{}/{}'.format(self.tempdir, self.outfile))
self._process = subprocess.Popen([
self._ffmpeg_path,
# '-nostdin',
# '-nostats', # will this omit any useful information?
'-loglevel', '40', # 40+ required for wait() to check output
'-copytb', '1',
'-i', self.stream_url,
'-c', 'copy',
*extra_args,
normed_outpath
],
stdin=subprocess.DEVNULL,
stderr=subprocess.PIPE, # ffmpeg sends all output to stderr
universal_newlines=True,
bufsize=1,
env=env)
|
|
## @file
# process FD Region generation
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
from struct import *
from .GenFdsGlobalVariable import GenFdsGlobalVariable
from io import BytesIO
import string
import Common.LongFilePathOs as os
from stat import *
from Common import EdkLogger
from Common.BuildToolError import *
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
from Common.DataType import BINARY_FILE_TYPE_FV
## generate Region
#
#
class Region(object):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
self.Offset = None # The begin position of the Region
self.Size = None # The Size of the Region
self.PcdOffset = None
self.PcdSize = None
self.SetVarDict = {}
self.RegionType = None
self.RegionDataList = []
## PadBuffer()
#
# Add padding bytes to the Buffer
#
# @param Buffer The buffer the generated region data will be put
# in
# @param ErasePolarity Flash erase polarity
# @param Size Number of padding bytes requested
#
def PadBuffer(self, Buffer, ErasePolarity, Size):
if Size > 0:
if (ErasePolarity == '1') :
PadByte = pack('B', 0xFF)
else:
PadByte = pack('B', 0)
PadData = ''.join(PadByte for i in xrange(0, Size))
Buffer.write(PadData)
## AddToBuffer()
#
# Add region data to the Buffer
#
# @param self The object pointer
# @param Buffer The buffer generated region data will be put
# @param BaseAddress base address of region
# @param BlockSize block size of region
# @param BlockNum How many blocks in region
# @param ErasePolarity Flash erase polarity
# @param VtfDict VTF objects
# @param MacroDict macro value pair
# @retval string Generated FV file path
#
def AddToBuffer(self, Buffer, BaseAddress, BlockSizeList, ErasePolarity, ImageBinDict, vtfDict=None, MacroDict={}, Flag=False):
Size = self.Size
if not Flag:
GenFdsGlobalVariable.InfLogger('\nGenerate Region at Offset 0x%X' % self.Offset)
GenFdsGlobalVariable.InfLogger(" Region Size = 0x%X" % Size)
GenFdsGlobalVariable.SharpCounter = 0
if Flag and (self.RegionType != BINARY_FILE_TYPE_FV):
return
if self.RegionType == BINARY_FILE_TYPE_FV:
#
# Get Fv from FvDict
#
self.FvAddress = int(BaseAddress, 16) + self.Offset
FvBaseAddress = '0x%X' % self.FvAddress
FvOffset = 0
for RegionData in self.RegionDataList:
FileName = None
if RegionData.endswith(".fv"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region FV File Name = .fv : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'fv' in ImageBinDict:
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
FileName = ImageBinDict[RegionData.upper() + 'fv']
else:
#
# Generate FvImage.
#
FvObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict[RegionData.upper()]
if FvObj is not None :
if not Flag:
GenFdsGlobalVariable.InfLogger(' Region Name = FV')
#
# Call GenFv tool
#
self.BlockInfoOfRegion(BlockSizeList, FvObj)
self.FvAddress = self.FvAddress + FvOffset
FvAlignValue = GenFdsGlobalVariable.GetAlignment(FvObj.FvAlignment)
if self.FvAddress % FvAlignValue != 0:
EdkLogger.error("GenFds", GENFDS_ERROR,
"FV (%s) is NOT %s Aligned!" % (FvObj.UiFvName, FvObj.FvAlignment))
FvBuffer = BytesIO('')
FvBaseAddress = '0x%X' % self.FvAddress
BlockSize = None
BlockNum = None
FvObj.AddToBuffer(FvBuffer, FvBaseAddress, BlockSize, BlockNum, ErasePolarity, vtfDict, Flag=Flag)
if Flag:
continue
FvBufferLen = len(FvBuffer.getvalue())
if FvBufferLen > Size:
FvBuffer.close()
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV (%s) is larger than Region Size 0x%X specified." % (RegionData, Size))
#
# Put the generated image into FD buffer.
#
Buffer.write(FvBuffer.getvalue())
FvBuffer.close()
FvOffset = FvOffset + FvBufferLen
Size = Size - FvBufferLen
continue
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "FV (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the exist Fv image into FD buffer
#
if not Flag:
if FileName is not None:
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of FV File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
if not Flag:
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'CAPSULE':
#
# Get Capsule from Capsule Dict
#
for RegionData in self.RegionDataList:
if RegionData.endswith(".cap"):
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
GenFdsGlobalVariable.InfLogger(' Region CAPSULE Image Name = .cap : %s' % RegionData)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
FileName = RegionData
elif RegionData.upper() + 'cap' in ImageBinDict:
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
FileName = ImageBinDict[RegionData.upper() + 'cap']
else:
#
# Generate Capsule image and Put it into FD buffer
#
CapsuleObj = None
if RegionData.upper() in GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict:
CapsuleObj = GenFdsGlobalVariable.FdfParser.Profile.CapsuleDict[RegionData.upper()]
if CapsuleObj is not None :
CapsuleObj.CapsuleName = RegionData.upper()
GenFdsGlobalVariable.InfLogger(' Region Name = CAPSULE')
#
# Call GenFv tool to generate Capsule Image
#
FileName = CapsuleObj.GenCapsule()
CapsuleObj.CapsuleName = None
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "Capsule (%s) is NOT described in FDF file!" % (RegionData))
#
# Add the capsule image into FD buffer
#
FileLength = os.stat(FileName)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size 0x%X of Capsule File (%s) is larger than Region Size 0x%X specified." \
% (FileLength, RegionData, Size))
BinFile = open(FileName, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType in ('FILE', 'INF'):
for RegionData in self.RegionDataList:
if self.RegionType == 'INF':
RegionData.__InfParse__(None)
if len(RegionData.BinFileList) != 1:
EdkLogger.error('GenFds', GENFDS_ERROR, 'INF in FD region can only contain one binary: %s' % RegionData)
File = RegionData.BinFileList[0]
RegionData = RegionData.PatchEfiFile(File.Path, File.Type)
else:
RegionData = GenFdsGlobalVariable.MacroExtend(RegionData, MacroDict)
if RegionData[1] != ':' :
RegionData = mws.join (GenFdsGlobalVariable.WorkSpaceDir, RegionData)
if not os.path.exists(RegionData):
EdkLogger.error("GenFds", FILE_NOT_FOUND, ExtraData=RegionData)
#
# Add the file image into FD buffer
#
FileLength = os.stat(RegionData)[ST_SIZE]
if FileLength > Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
"Size of File (%s) is larger than Region Size 0x%X specified." \
% (RegionData, Size))
GenFdsGlobalVariable.InfLogger(' Region File Name = %s' % RegionData)
BinFile = open(RegionData, 'rb')
Buffer.write(BinFile.read())
BinFile.close()
Size = Size - FileLength
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType == 'DATA' :
GenFdsGlobalVariable.InfLogger(' Region Name = DATA')
DataSize = 0
for RegionData in self.RegionDataList:
Data = RegionData.split(',')
DataSize = DataSize + len(Data)
if DataSize > Size:
EdkLogger.error("GenFds", GENFDS_ERROR, "Size of DATA is larger than Region Size ")
else:
for item in Data :
Buffer.write(pack('B', int(item, 16)))
Size = Size - DataSize
#
# Pad the left buffer
#
self.PadBuffer(Buffer, ErasePolarity, Size)
if self.RegionType is None:
GenFdsGlobalVariable.InfLogger(' Region Name = None')
self.PadBuffer(Buffer, ErasePolarity, Size)
## BlockSizeOfRegion()
#
# @param BlockSizeList List of block information
# @param FvObj The object for FV
#
def BlockInfoOfRegion(self, BlockSizeList, FvObj):
Start = 0
End = 0
RemindingSize = self.Size
ExpectedList = []
for (BlockSize, BlockNum, pcd) in BlockSizeList:
End = Start + BlockSize * BlockNum
# region not started yet
if self.Offset >= End:
Start = End
continue
# region located in current blocks
else:
# region ended within current blocks
if self.Offset + self.Size <= End:
ExpectedList.append((BlockSize, (RemindingSize + BlockSize - 1) / BlockSize))
break
# region not ended yet
else:
# region not started in middle of current blocks
if self.Offset <= Start:
UsedBlockNum = BlockNum
# region started in middle of current blocks
else:
UsedBlockNum = (End - self.Offset) / BlockSize
Start = End
ExpectedList.append((BlockSize, UsedBlockNum))
RemindingSize -= BlockSize * UsedBlockNum
if FvObj.BlockSizeList == []:
FvObj.BlockSizeList = ExpectedList
else:
# first check whether FvObj.BlockSizeList items have only "BlockSize" or "NumBlocks",
# if so, use ExpectedList
for Item in FvObj.BlockSizeList:
if Item[0] is None or Item[1] is None:
FvObj.BlockSizeList = ExpectedList
break
# make sure region size is no smaller than the summed block size in FV
Sum = 0
for Item in FvObj.BlockSizeList:
Sum += Item[0] * Item[1]
if self.Size < Sum:
EdkLogger.error("GenFds", GENFDS_ERROR, "Total Size of FV %s 0x%x is larger than Region Size 0x%x "
% (FvObj.UiFvName, Sum, self.Size))
# check whether the BlockStatements in FV section is appropriate
ExpectedListData = ''
for Item in ExpectedList:
ExpectedListData += "BlockSize = 0x%x\n\tNumBlocks = 0x%x\n\t" % Item
Index = 0
for Item in FvObj.BlockSizeList:
if Item[0] != ExpectedList[Index][0]:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
elif Item[1] != ExpectedList[Index][1]:
if (Item[1] < ExpectedList[Index][1]) and (Index == len(FvObj.BlockSizeList) - 1):
break;
else:
EdkLogger.error("GenFds", GENFDS_ERROR, "BlockStatements of FV %s are not align with FD's, suggested FV BlockStatement"
% FvObj.UiFvName, ExtraData=ExpectedListData)
else:
Index += 1
|
|
# Copyright 2016 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Application Load Balancers
"""
import logging
from collections import defaultdict
from c7n.actions import ActionRegistry, BaseAction
from c7n.filters import Filter, FilterRegistry, DefaultVpcBase, ValueFilter
import c7n.filters.vpc as net_filters
from c7n import tags
from c7n.manager import resources
from c7n.query import QueryResourceManager
from c7n.utils import local_session, chunks, type_schema, get_retry
log = logging.getLogger('custodian.app-elb')
filters = FilterRegistry('app-elb.filters')
actions = ActionRegistry('app-elb.actions')
filters.register('tag-count', tags.TagCountFilter)
filters.register('marked-for-op', tags.TagActionFilter)
@resources.register('app-elb')
class AppELB(QueryResourceManager):
"""Resource manager for v2 ELBs (AKA ALBs).
"""
class Meta(object):
service = 'elbv2'
type = 'app-elb'
enum_spec = ('describe_load_balancers', 'LoadBalancers', None)
name = 'LoadBalancerName'
id = 'LoadBalancerArn'
filter_name = None
filter_type = None
dimension = None
date = 'CreatedTime'
config_type = 'AWS::ElasticLoadBalancingV2::LoadBalancer'
resource_type = Meta
filter_registry = filters
action_registry = actions
retry = staticmethod(get_retry(('Throttling',)))
def augment(self, albs):
_describe_appelb_tags(
albs, self.session_factory,
self.executor_factory, self.retry)
return albs
def _describe_appelb_tags(albs, session_factory, executor_factory, retry):
def _process_tags(alb_set):
client = local_session(session_factory).client('elbv2')
alb_map = {alb['LoadBalancerArn']: alb for alb in alb_set}
results = retry(client.describe_tags, ResourceArns=alb_map.keys())
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in alb_map):
alb_map[tag_desc['ResourceArn']]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(albs, 20)))
def _add_appelb_tags(albs, session_factory, ts):
client = local_session(session_factory).client('elbv2')
client.add_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in albs],
Tags=ts)
def _remove_appelb_tags(albs, session_factory, tag_keys):
client = local_session(session_factory).client('elbv2')
client.remove_tags(
ResourceArns=[alb['LoadBalancerArn'] for alb in albs],
TagKeys=tag_keys)
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "SecurityGroups[]"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "AvailabilityZones[].SubnetId"
@actions.register('mark-for-op')
class AppELBMarkForOpAction(tags.TagDelayedAction):
batch_size = 1
def process_resource_set(self, resource_set, ts):
_add_appelb_tags(
resource_set,
self.manager.session_factory,
ts)
@actions.register('tag')
class AppELBTagAction(tags.Tag):
batch_size = 1
def process_resource_set(self, resource_set, ts):
_add_appelb_tags(
resource_set,
self.manager.session_factory,
ts)
@actions.register('remove-tag')
class AppELBRemoveTagAction(tags.RemoveTag):
batch_size = 1
def process_resource_set(self, resource_set, tag_keys):
_remove_appelb_tags(
resource_set,
self.manager.session_factory,
tag_keys)
@actions.register('delete')
class AppELBDeleteAction(BaseAction):
schema = type_schema('delete')
def process(self, load_balancers):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_alb, load_balancers))
def process_alb(self, alb):
client = local_session(self.manager.session_factory).client('elbv2')
client.delete_load_balancer(LoadBalancerArn=alb['LoadBalancerArn'])
class AppELBListenerFilterBase(object):
""" Mixin base class for filters that query LB listeners.
"""
def initialize(self, albs):
def _process_listeners(alb):
client = local_session(
self.manager.session_factory).client('elbv2')
results = client.describe_listeners(
LoadBalancerArn=alb['LoadBalancerArn'])
self.listener_map[alb['LoadBalancerArn']] = results['Listeners']
self.listener_map = defaultdict(list)
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_listeners, albs))
class AppELBAttributeFilterBase(object):
""" Mixin base class for filters that query LB attributes.
"""
def initialize(self, albs):
def _process_attributes(alb):
if 'Attributes' not in alb:
client = local_session(
self.manager.session_factory).client('elbv2')
results = client.describe_load_balancer_attributes(
LoadBalancerArn=alb['LoadBalancerArn'])
alb['Attributes'] = results['Attributes']
with self.manager.executor_factory(max_workers=2) as w:
list(w.map(_process_attributes, albs))
class AppELBTargetGroupFilterBase(object):
""" Mixin base class for filters that query LB target groups.
"""
def initialize(self, albs):
self.target_group_map = defaultdict(list)
target_group_manager = AppELBTargetGroup(self.manager.ctx, {})
target_groups = target_group_manager.resources()
for target_group in target_groups:
for load_balancer_arn in target_group['LoadBalancerArns']:
self.target_group_map[load_balancer_arn].append(target_group)
@filters.register('listener')
class AppELBListenerFilter(ValueFilter, AppELBListenerFilterBase):
"""
"""
schema = type_schema('listener', rinherit=ValueFilter.schema)
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBListenerFilter, self).process(albs, event)
def __call__(self, alb):
listeners = self.listener_map[alb['LoadBalancerArn']]
return self.match(listeners)
@filters.register('healthcheck-protocol-mismatch')
class AppELBHealthCheckProtocolMismatchFilter(Filter,
AppELBTargetGroupFilterBase):
"""
"""
schema = type_schema('healthcheck-protocol-mismatch')
def process(self, albs, event=None):
def _healthcheck_protocol_mismatch(alb):
for target_group in self.target_group_map[alb['LoadBalancerArn']]:
if (target_group['Protocol'] !=
target_group['HealthCheckProtocol']):
return True
return False
self.initialize(albs)
return [alb for alb in albs if _healthcheck_protocol_mismatch(alb)]
@filters.register('target-group')
class AppELBTargetGroupFilter(ValueFilter, AppELBTargetGroupFilterBase):
"""
"""
schema = type_schema('target-group', rinherit=ValueFilter.schema)
def process(self, albs, event=None):
self.initialize(albs)
return super(AppELBTargetGroupFilter, self).process(albs, event)
def __call__(self, alb):
target_groups = self.target_group_map[alb['LoadBalancerArn']]
return self.match(target_groups)
@filters.register('default-vpc')
class AppELBDefaultVpcFilter(DefaultVpcBase):
schema = type_schema('default-vpc')
def __call__(self, alb):
return alb.get('VpcId') and self.match(alb.get('VpcId')) or False
@resources.register('app-elb-target-group')
class AppELBTargetGroup(QueryResourceManager):
"""Resource manager for v2 ELB target groups.
"""
class Meta(object):
service = 'elbv2'
type = 'app-elb-target-group'
enum_spec = ('describe_target_groups', 'TargetGroups', None)
name = 'TargetGroupName'
id = 'TargetGroupArn'
filter_name = None
filter_type = None
dimension = None
date = None
resource_type = Meta
filter_registry = FilterRegistry('app-elb-target-group.filters')
action_registry = ActionRegistry('app-elb-target-group.actions')
retry = staticmethod(get_retry(('Throttling',)))
filter_registry.register('tag-count', tags.TagCountFilter)
filter_registry.register('marked-for-op', tags.TagActionFilter)
def augment(self, target_groups):
def _describe_target_group_health(target_group):
client = local_session(self.session_factory).client('elbv2')
result = client.describe_target_health(
TargetGroupArn=target_group['TargetGroupArn'])
target_group['TargetHealthDescriptions'] = result[
'TargetHealthDescriptions']
with self.executor_factory(max_workers=2) as w:
list(w.map(_describe_target_group_health, target_groups))
_describe_target_group_tags(
target_groups, self.session_factory,
self.executor_factory, self.retry)
return target_groups
def _describe_target_group_tags(target_groups, session_factory,
executor_factory, retry):
def _process_tags(target_group_set):
client = local_session(session_factory).client('elbv2')
target_group_map = {
target_group['TargetGroupArn']:
target_group for target_group in target_group_set
}
results = retry(
client.describe_tags,
ResourceArns=target_group_map.keys())
for tag_desc in results['TagDescriptions']:
if ('ResourceArn' in tag_desc and
tag_desc['ResourceArn'] in target_group_map):
target_group_map[
tag_desc['ResourceArn']
]['Tags'] = tag_desc['Tags']
with executor_factory(max_workers=2) as w:
list(w.map(_process_tags, chunks(target_groups, 20)))
def _add_target_group_tags(target_groups, session_factory, ts):
client = local_session(session_factory).client('elbv2')
client.add_tags(
ResourceArns=[
target_group['TargetGroupArn'] for target_group in target_groups
],
Tags=ts)
def _remove_target_group_tags(target_groups, session_factory, tag_keys):
client = local_session(session_factory).client('elbv2')
client.remove_tags(
ResourceArns=[
target_group['TargetGroupArn'] for target_group in target_groups
],
TagKeys=tag_keys)
@AppELBTargetGroup.action_registry.register('mark-for-op')
class AppELBTargetGroupMarkForOpAction(tags.TagDelayedAction):
batch_size = 1
def process_resource_set(self, resource_set, ts):
_add_target_group_tags(
resource_set,
self.manager.session_factory,
ts)
@AppELBTargetGroup.action_registry.register('tag')
class AppELBTargetGroupTagAction(tags.Tag):
batch_size = 1
def process_resource_set(self, resource_set, ts):
_add_target_group_tags(
resource_set,
self.manager.session_factory,
ts)
@AppELBTargetGroup.action_registry.register('remove-tag')
class AppELBTargetGroupRemoveTagAction(tags.RemoveTag):
batch_size = 1
def process_resource_set(self, resource_set, tag_keys):
_remove_target_group_tags(
resource_set,
self.manager.session_factory,
tag_keys)
@AppELBTargetGroup.filter_registry.register('default-vpc')
class AppELBTargetGroupDefaultVpcFilter(DefaultVpcBase):
schema = type_schema('default-vpc')
def __call__(self, target_group):
return (target_group.get('VpcId') and
self.match(target_group.get('VpcId')) or False)
|
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
import os
from pymongo import MongoClient
from vistrails.core.modules.vistrails_module import Module
class MongoDatabase(Module):
"""Connects to MongoDB and selects a database.
"""
_input_ports = [('host', '(basic:String)',
{'optional': True, 'defaults': '["localhost"]'}),
('port', '(basic:Integer)',
{'optional': True}),
('database', '(basic:String)')]
_output_ports = [('database', '(MongoDatabase)')]
def compute(self):
kwargs = {'host': self.get_input('host')}
if self.has_input('port'):
kwargs['port'] = self.get_input('port')
client = MongoClient(**kwargs)
database = client.get_database(self.get_input('database'))
self.set_output('database', database)
# Database operations
class MongoCollection(Module):
"""A single collection in a mongo database.
"""
_input_ports = [('database', MongoDatabase),
('name', '(basic:String)')]
_output_ports = [('collection', '(MongoCollection)')]
def compute(self):
self.set_output('collection',
self.get_input('database')[self.get_input('name')])
class DropCollection(Module):
"""Drops a collection from a MongoDB database.
"""
_input_ports = [('collection', MongoCollection)]
_output_ports = [('database', MongoDatabase)]
def compute(self):
collection = self.get_input('collection')
database = collection.database
collection.drop()
self.set_output('database', database)
class RenameCollection(Module):
"""Renames a collection in a MongoDB database.
"""
_input_ports = [('collection', MongoCollection),
('new_name', '(basic:String)')]
_output_ports = [('collection', MongoCollection),
('database', MongoDatabase)]
def compute(self):
collection = self.get_input('collection')
new_name = self.get_input('new_name')
collection.rename(new_name)
self.set_output('collection', collection)
self.set_output('database', collection.database)
_modules = [MongoDatabase, MongoCollection, DropCollection, RenameCollection]
# Data operations
class BaseCollectionOperation(Module):
_input_ports = [('collection', MongoCollection)]
_output_ports = [('collection', MongoCollection)]
collection_op_out = None
def compute(self):
collection = self.get_input('collection')
out = self.collection_operation(collection)
if self.collection_op_out is not None:
self.set_output(self.collection_op_out, out)
self.set_output('collection', collection)
def collection_operation(self, collection):
raise NotImplementedError
_modules.append((BaseCollectionOperation, {'abstract': True}))
def collection_op(input_ports, output=None, doc=None):
def wrapper(func):
dct = {'_input_ports': input_ports,
'collection_operation': func,
'__doc__': doc}
if output:
dct['_output_ports'] = [output]
dct['collection_op_out'] = output[0]
_modules.append(type(func.func_name, (BaseCollectionOperation,), dct))
return func
return wrapper
@collection_op([('document', '(basic:Dictionary)')],
doc="Insert a single document into a collection.")
def InsertOne(self, coll):
coll.insert_one(self.get_input('document'))
@collection_op([('filter', '(basic:Dictionary)'),
('document', '(basic:Dictionary)')],
doc="Replace a single document matching the filter.")
def ReplaceOne(self, coll):
coll.replace_one(self.get_input('filter'), self.get_input('document'))
@collection_op([('filter', '(basic:Dictionary)'), ('document', '(basic:Dictionary)'),
('insert_if_nomatch', '(basic:Boolean)',
{'optional': True, 'defaults': ['True']})],
doc="Update one document matching the selector.")
def UpdateOne(self, coll):
coll.update_one(self.get_input('filter'),
self.get_input('document'),
upsert=self.get_input('insert_if_nomatch'))
@collection_op([('filter', '(basic:Dictionary)')],
doc="Delete a single document matching the filter.")
def DeleteOne(self, coll):
coll.delete_one(self.get_input('filter'))
@collection_op([('filter', '(basic:Dictionary)')],
doc="Delete one or more documents matching the filter.")
def DeleteMany(self, coll):
coll.delete_many(self.get_input('filter'))
@collection_op([('pipeline', '(basic:List)')],
output=('results', '(basic:List)'),
doc="Calculate aggregate values for the data in the "
"collection.\n"
"\n"
"The pipeline is a list of operations. See the `aggregation"
" pipeline operators <https://docs.mongodb.com/manual/ref"
"erence/operator/aggregation-pipeline/>`__ for details.")
def Aggregate(self, coll):
return list(coll.aggregate(self.get_input('pipeline')))
@collection_op([('filter', '(basic:Dictionary)'),
('limit', '(basic:Integer)', {'optional': True})],
output=('results', '(basic:List)'),
doc="Query the database.")
def Find(self, coll):
return list(coll.find(self.get_input('filter'),
limit=self.force_get_input('limit', 0)))
@collection_op([('filter', '(basic:Dictionary)')],
output=('document', '(basic:Dictionary)'),
doc="Get a single document from the database.")
def FindOne(self, coll):
return coll.find_one(self.get_input('filter'))
@collection_op([('filter', '(basic:Dictionary)')],
output=('old_document', '(basic:Dictionary)'),
doc="Finds a single document and deletes it, returning the "
"document.")
def FindOneAndDelete(self, coll):
return coll.find_one_and_delete(self.get_input('filter'))
@collection_op([('filter', '(basic:Dictionary)'), ('document', '(basic:Dictionary)')],
output=('old_document', '(basic:Dictionary)'),
doc="Finds a single document and replaces it, returning the "
"old document.")
def FindOneAndReplace(self, coll):
return coll.find_one_and_replace(self.get_input('filter'),
self.get_input('document'))
@collection_op([('filter', '(basic:Dictionary)'),
('update', '(basic:Dictionary)')],
output=('old_document', '(basic:Dictionary)'),
doc="Finds a single document and updates it, returning the old "
"document.")
def FindOneAndUpdate(self, coll):
return coll.find_one_and_update(self.get_input('filter'),
self.get_input('update'))
@collection_op([('filter', '(basic:Dictionary)', {'optional': True})],
output=('count', '(basic:Integer)'),
doc="Get the number of documents in this collection, "
"optionally matching a filter.")
def Count(self, coll):
return coll.count(self.force_get_input('filter'))
@collection_op([('key', '(basic:String)'), ('filter', '(basic:Dictionary)')],
output=('results', '(basic:List)'),
doc="Get a list of distinct values for `key` among all "
"documents in this collection, or the ones matching the "
"filter.")
def Distinct(self, coll):
return list(coll.distinct(self.get_input('key'), self.get_input('filter')))
@collection_op([('key', '(basic:List)'), ('condition', '(basic:Dictionary)'),
('initial', '(basic:Variant)'), ('reduce', '(basic:String)'),
('finalize', '(basic:String)')],
output=('results', '(basic:List)'),
doc="Perform a query similar to an SQL *group by* operation.\n"
"\n"
"Returns an array of grouped items.\n"
"\n"
" - `key` is a list of keys to group by.\n"
" - `condition` specifies which documents to consider.\n"
" - `initial` is the initial value of the aggregation "
"counter object.\n"
" - `reduce` is the aggregation function as JavaScript.\n"
" - `finalize` is the function to be called on each "
"object in the output list.")
def Group(self, coll):
return list(coll.group(self.get_input('key'), self.get_input('condition'),
self.get_input('initial'), self.get_input('reduce'),
self.force_get_input('finalize', None)))
@collection_op([('map', '(basic:String)'), ('reduce', '(basic:String)'),
('out', '(basic:String)')],
output=('results', MongoCollection),
doc="Perform a map/reduce operation on this collection.\n"
"\n"
"Returns a collection object containing the results of the "
"operation.\n"
"\n"
" - `map` is the map function as JavaScript.\n"
" - `reduce` is the reduce function as JavaScript.\n"
" - `out` is the output collection name.")
def MapReduce(self, coll):
return coll.map_reduce(self.get_input('map'), self.get_input('reduce'),
self.get_input('out'))
###############################################################################
import unittest
class TestMongoDB(unittest.TestCase):
@classmethod
def setUpClass(cls):
if 'VISTRAILS_TEST_MONGODB' not in os.environ:
raise unittest.SkipTest(
"MongoDB tests need $VISTRAILS_TEST_MONGODB to point to a "
"MongoDB server")
else:
uri = os.environ['VISTRAILS_TEST_MONGODB']
host, port = uri.rsplit(':', 1)
port = int(port)
def mock_get_input(self, name):
if name == 'host':
return host
elif name == 'port':
return port
else:
return Module.get_input(self, name)
MongoDatabase.get_input = mock_get_input
MongoDatabase.has_input = lambda s, n: True
@classmethod
def tearDownClass(cls):
del MongoDatabase.get_input
del MongoDatabase.has_input
def test_example(self):
"""Runs the example vt file.
"""
from vistrails.tests.utils import run_file
self.assertFalse(run_file('examples/mongodb.vt'))
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
from proton import generate_uuid
import collections
class Schema(object):
schema = {}
@staticmethod
def i(entity, attribute):
return Schema.schema[entity]["attributeNames"].index(attribute)
@staticmethod
def type(entity):
return Schema.schema[entity]["fullyQualifiedType"]
@staticmethod
def init():
with open("topologies/schema.json") as fp:
data = json.load(fp)
for entity in data["entityTypes"]:
Schema.schema[entity] = {"attributeNames": [],
"fullyQualifiedType": data["entityTypes"][entity]["fullyQualifiedType"]}
for attribute in data["entityTypes"][entity]["attributes"]:
Schema.schema[entity]["attributeNames"].append(attribute)
Schema.schema[entity]["attributeNames"].append("type")
class SparseList(list):
'''
from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
'''
def __setitem__(self, index, value):
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def __getitem__(self, index):
try: return list.__getitem__(self, index)
except IndexError: return None
class Entity(object):
def __init__(self, name):
self.name = name
self.value = SparseList()
self.settype()
def setval(self, attribute, value):
self.value[Schema.i(self.name, attribute)] = value
def settype(self):
self.setval("type", Schema.type(self.name))
def setZero(self, attributes):
for attribute in attributes:
self.setval(attribute, 0)
def getval(self, attr):
return self.value[Schema.i(self.name, attr)]
def vals(self):
return self.value
class Multiple(object):
def __init__(self):
self.results = []
def vals(self):
return self.results
class RouterNode(Entity):
instance = 0
def __init__(self, f, t, links, hopper):
super(RouterNode, self).__init__("router.node")
self.hopper = hopper
self.init(f, t, links)
def init(self, f, t, links):
RouterNode.instance += 1
self.setval("name", "router.node/" + t)
self.setval("nextHop", "(self)" if f == t else self.hopper.get(f, t, links))
self.setval("validOrigins", [])
self.setval("linkState", [])
self.setval("instance", RouterNode.instance)
self.setval("cost", 1)
self.setval("address", "amqp:/_topo/0/" + t)
self.setval("id", t)
self.setval("identity", self.value[Schema.i(self.name, "name")])
def reset(self):
RouterNode.nh = NextHop()
class Connector(Entity):
def __init__(self, host, port):
super(Connector, self).__init__("connector")
self.init(host, port)
def init(self, host, port):
self.setval("verifyHostName", True)
self.setval("cost", 1)
self.setval("addr", "127.0.0.1")
self.setval("maxSessions", 32768)
self.setval("allowRedirect", True)
self.setval("idleTimeoutSeconds", 16)
self.setval("saslMechanisms", "AMONYMOUS")
self.setval("maxFrameSize", 16384)
self.setval("maxSessionFrames", 100)
self.setval("host", host)
self.setval("role", "inter-router")
self.setval("stripAnnotations", "both")
self.setval("port", port)
self.setval("identity", "connector/" + host + ":" + port)
self.setval("name", self.getval("identity"))
class Policy(Entity):
def __init__(self):
super(Policy, self).__init__("policy")
self.init()
def init(self):
self.setval("connectionsProcessed", 2)
self.setval("defaultVhost", "$default")
self.setval("connectionsDenied", 0)
self.setval("enableVhostPolicy", False)
self.setval("maxConnections", 65535)
self.setval("connectionsCurrent", self.getval("connectionsProcessed"))
self.setval("identity", 1)
self.setval("name", "policy/" + str(self.getval("identity")))
class Logs(Multiple):
modules = ["AGENT", "CONTAINER", "DEFAULT", "ERROR", "MESSAGE", "POLICY", "ROUTER", "ROUTER_CORE", "ROUTER_HELLO",
"ROUTER_LS", "ROUTER_MA", "SERVER"]
def __init__(self):
super(Logs, self).__init__()
for module in Logs.modules:
self.results.append(Log(module).vals())
class Log(Entity):
def __init__(self, module):
super(Log, self).__init__("log")
self.init(module)
def init(self, module):
self.setval("name", "log/" + module)
self.setval("identity", self.getval("name"))
self.setval("module", module)
class Allocators(Multiple):
names = [["qd_bitmask", 24], ["_buffer", 536], ["_composed_field", 64], ["_composite", 112], ["_connection", 232],
["_connector", 56], ["_deferred_call", 32], ["_field_iterator", 128], ["_hash_handle", 16],
["_hash_item", 32], ["_hash_segment", 24], ["_link", 48], ["_listener", 32], ["_log_entry", 2104],
["_management_context", 56], ["_message_context", 640], ["_message", 128], ["_node", 56],
["_parsed_field", 88], ["_timer", 56], ["_work_item", 24], ["pn_connector", 600], ["pn_listener", 48],
["r_action", 160], ["r_address_config", 56], ["r_address", 264], ["r_connection", 232],
["r_connection_work", 56], ["r_delivery_ref", 24], ["r_delivery", 144], ["r_field", 40],
["r_general_work", 64], ["r_link_ref", 24], ["r_link", 304], ["r_node", 64], ["r_query", 336],
["r_terminus", 64], ["tm_router", 16]]
def __init__(self):
super(Allocators, self).__init__()
for name in Allocators.names:
self.results.append(Allocator(name).vals())
class Allocator(Entity):
def __init__(self, name):
super(Allocator, self).__init__("allocator")
self.init(name)
def init(self, name):
n = "qd" + name[0] + "_t"
self.setZero(["heldByThreads", "transferBatchSize", "globalFreeListMax", "batchesRebalancedToGlobal", "batchesRebalancedToThreads",
"totalFreeToHeap", "totalAllocFromHeap", "localFreeListMax"])
self.setval("name", "allocator/" + n)
self.setval("identity", self.getval("name"))
self.setval("typeName", n)
self.setval("typeSize", name[1])
class RouterAddresses(Multiple):
def __init__(self, node, nodes):
super(RouterAddresses, self).__init__()
addresses = {}
others = []
for n in nodes:
if n['nodeType'] == 'inter-router':
if n['name'] != node['name']:
self.results.append(RouterAddress("R"+n['name'], [n['name']], "closest", 0).vals())
others.append(n['name'])
else:
for normal in n['normals']:
nname = '.'.join(normal['name'].split('.')[:-1])
if "console_identifier" not in node['properties']:
maddr = "M0" + normal['addr']
if maddr not in addresses:
addresses[maddr] = []
if nname != node['name']:
if nname not in addresses[maddr]:
addresses[maddr].append(nname)
for address in addresses:
self.results.append(RouterAddress(address, addresses[address], "balanced", 0).vals())
self.results.append(RouterAddress("L_$management_internal", [], "closest", 1).vals())
self.results.append(RouterAddress("M0$management", [], "closest", 1).vals())
self.results.append(RouterAddress("L$management", [], "closest", 1).vals())
self.results.append(RouterAddress("L$qdhello", [], "flood", 1).vals())
self.results.append(RouterAddress("L$qdrouter", [], "flood", 1).vals())
self.results.append(RouterAddress("L$qdrouter.ma", [], "multicast", 1).vals())
self.results.append(RouterAddress("Tqdrouter", others, "flood", 1).vals())
self.results.append(RouterAddress("Tqdrouter.ma", others, "multicast", 1).vals())
class RouterAddress(Entity):
def __init__(self, name, rhrList, distribution, inProcess):
super(RouterAddress, self).__init__("router.address")
self.init(name, rhrList, distribution, inProcess)
def init(self, name, rhrList, distribution, inProcess):
self.setZero(["subscriberCount", "deliveriesEgress", "deliveriesIngress",
"deliveriesFromContainer", "deliveriesTransit", "containerCount",
"trackedDeliveries", "deliveriesToContainer"])
self.setval("name", name)
self.setval("key", self.getval("name"))
self.setval("distribution", distribution)
self.setval("identity", self.getval("name"))
self.setval("remoteHostRouters", rhrList)
self.setval("remoteCount", len(rhrList))
self.setval("inProcess", inProcess)
class Address(Entity):
def __init__(self):
super(Address, self).__init__("address")
self.init()
def init(self):
self.setval("egressPhase", 0)
self.setval("ingressPhase", 0)
self.setval("prefix", "closest")
self.setval("waypoint", False)
self.setval("distribution", "closest")
self.setval("identity", 1)
self.setval("name", "address/" + str(self.getval("identity")))
class Router(Entity):
def __init__(self, node):
super(Router, self).__init__("router")
self.init(node)
def init(self, node):
self.setval("mobileAddrMaxAge", 60)
self.setval("raIntervalFlux", 4)
self.setval("workerThreads", 4)
self.setval("name", "router/" + node['name'])
self.setval("helloInterval", 1)
self.setval("area", 0)
self.setval("helloMaxAge", 3)
self.setval("remoteLsMaxAge", 60)
self.setval("addrCount", 0)
self.setval("raInterval", 30)
self.setval("mode", "interior")
self.setval("nodeCount", 0)
self.setval("saslConfigName", "qdrouterd")
self.setval("linkCount", 0)
self.setval("id", node['name'])
self.setval("identity", "router/" + node['name'])
class Listener(Entity):
def __init__(self, port):
super(Listener, self).__init__("listener")
self.init(port)
def init(self, port):
self.setval("stripAnnotations", "both")
self.setval("requireSsl", False)
self.setval("idleTimeoutSeconds", 16)
self.setval("cost", 1)
self.setval("port", str(port))
self.setval("addr", "0.0.0.0")
self.setval("saslMechanisms", "ANONYMOUS")
self.setval("requireEncryption", False)
self.setval("linkCapacity", 4)
self.setval("role", "normal")
self.setval("authenticatePeer", False)
self.setval("host", "::")
self.setval("identity", "listener/:::" + str(port))
self.setval("name", self.getval("identity"))
self.setval("maxFrameSize", 16384)
class Connection(Entity):
def __init__(self, node, id):
super(Connection, self).__init__("connection")
self.init(node, id)
def init(self, node, id):
if "container" not in node:
self.setval("container", str(generate_uuid()))
else:
self.setval("container", node["container"])
self.setval("opened", True)
self.setval("name", "connection/0.0.0.0:" + str(id))
self.setval("properties", node["properties"])
self.setval("ssl", False)
if "host" in node:
self.setval("host", node["host"])
else:
self.setval("host", "0.0.0.0:20000")
if "isEncrypted" not in node:
self.setval("isEncrypted", False)
else:
self.setval("isEncrypted", node["isEncrypted"])
if "user" not in node:
self.setval("user", "anonymous")
else:
self.setval("user", node["user"])
self.setval("role", node["nodeType"])
self.setval("isAuthenticated", False)
self.setval("identity", id)
self.setval("dir", node["cdir"])
class RouterLink(Entity):
def __init__(self, node, identity, ldir, owningAddr, linkType, connId):
super(RouterLink, self).__init__("router.link")
self.init(node, identity, ldir, owningAddr, linkType, connId)
def init(self, node, identity, ldir, owningAddr, linkType, connId):
linkUuid = str(generate_uuid())
self.setval("name", linkUuid)
self.setval("identity", identity)
self.setval("linkName", linkUuid)
self.setval("linkType", linkType)
self.setval("linkDir", ldir)
self.setval("owningAddr", owningAddr)
self.setval("capacity", 250)
self.setZero(["undeliveredCount", "unsettledCount", "deliveryCount", "presettledCount", "acceptedCount",
"rejectedCount", "releasedCount", "modifiedCount"])
self.setval("connectionId", connId)
self.setval("adminStatus", "enabled")
self.setval("operStatus", "up")
Schema.init()
|
|
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.core.paginator import EmptyPage
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.catalog.models import Category
from lfs.catalog.models import GroupsPropertiesRelation
from lfs.catalog.models import Product
from lfs.catalog.models import Property
from lfs.catalog.models import PropertyGroup
from lfs.core.utils import LazyEncoder
from lfs.core.signals import product_removed_property_group
from lfs.manage.property_groups.forms import PropertyGroupForm
@permission_required("core.manage_shop")
def manage_property_groups(request):
"""The main view to manage properties.
"""
try:
prop = PropertyGroup.objects.all()[0]
url = reverse("lfs_manage_property_group", kwargs={"id": prop.id})
except IndexError:
url = reverse("lfs_manage_no_property_groups")
return HttpResponseRedirect(url)
@permission_required("core.manage_shop")
def manage_property_group(request, id, template_name="manage/property_groups/property_group.html"):
"""Edits property group with given id.
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
if request.method == "POST":
form = PropertyGroupForm(instance=property_group, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_group", kwargs={"id": property_group.id}),
msg=_(u"Property group has been saved."),
)
else:
form = PropertyGroupForm(instance=property_group)
return render_to_response(template_name, RequestContext(request, {
"property_group": property_group,
"property_groups": PropertyGroup.objects.all(),
"properties": properties_inline(request, id),
"products": products_tab(request, id),
"form": form,
"current_id": int(id),
}))
@permission_required("core.manage_shop")
def no_property_groups(request, template_name="manage/property_groups/no_property_groups.html"):
"""Displays that there are no property groups.
"""
return render_to_response(template_name, RequestContext(request, {}))
@permission_required("core.manage_shop")
def properties_inline(request, id, template_name="manage/property_groups/properties_inline.html"):
"""
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
gps = GroupsPropertiesRelation.objects.filter(group=id).select_related('property')
# Calculate assignable properties
#assigned_property_ids = [p.property.id for p in gps]
#assignable_properties = Property.objects.exclude(
# pk__in=assigned_property_ids).exclude(local=True)
assignable_properties = Property.objects.exclude(local=True).exclude(groupspropertiesrelation__in=gps)
assignable_properties = assignable_properties.order_by('name')
return render_to_string(template_name, RequestContext(request, {
"property_group": property_group,
"properties": assignable_properties,
"gps": gps,
}))
@permission_required("core.manage_shop")
def add_property_group(request, template_name="manage/property_groups/add_property_group.html"):
"""Adds a new property group
"""
if request.method == "POST":
form = PropertyGroupForm(data=request.POST)
if form.is_valid():
property_group = form.save()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_group", kwargs={"id": property_group.id}),
msg=_(u"Property group has been added."),
)
else:
form = PropertyGroupForm()
return render_to_response(template_name, RequestContext(request, {
"form": form,
"property_groups": PropertyGroup.objects.all(),
"came_from": (request.POST if request.method == 'POST' else request.GET).get("came_from",
reverse("lfs_manage_property_groups")),
}))
@permission_required("core.manage_shop")
@require_POST
def delete_property_group(request, id):
"""Deletes the property group with passed id.
"""
property_group = get_object_or_404(PropertyGroup, pk=id)
property_group.delete()
return lfs.core.utils.set_message_cookie(
url=reverse("lfs_manage_property_groups"),
msg=_(u"Property group has been deleted."),
)
@permission_required("core.manage_shop")
def assign_properties(request, group_id):
"""Assignes given properties (via request body) to passed group id.
"""
for property_id in request.POST.getlist("property-id"):
GroupsPropertiesRelation.objects.get_or_create(group_id=group_id, property_id=property_id)
_udpate_positions(group_id)
html = [["#properties", properties_inline(request, group_id)]]
result = json.dumps({
"html": html,
"message": _(u"Properties have been assigned.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def update_properties(request, group_id):
"""Update or Removes given properties (via request body) from passed group id.
"""
if request.POST.get("action") == "remove":
for property_id in request.POST.getlist("property-id"):
try:
gp = GroupsPropertiesRelation.objects.get(group=group_id, property=property_id)
except GroupsPropertiesRelation.DoesNotExist:
pass
else:
gp.delete()
message = _(u"Properties have been removed.")
else:
message = _(u"There are no properties to update.")
for gp in GroupsPropertiesRelation.objects.filter(group=group_id):
position = request.POST.get("position-%s" % gp.property.id, 999)
gp.position = int(position)
gp.save()
message = _(u"Properties have been updated.")
_udpate_positions(group_id)
html = [["#properties", properties_inline(request, group_id)]]
result = json.dumps({
"html": html,
"message": message
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
# Product tab
@permission_required("core.manage_shop")
def products_tab(request, product_group_id, template_name="manage/property_groups/products.html"):
"""Renders the products tab of the property groups management views.
"""
property_group = PropertyGroup.objects.get(pk=product_group_id)
inline = products_inline(request, product_group_id, as_string=True)
return render_to_string(template_name, RequestContext(request, {
"property_group": property_group,
"products_inline": inline,
}))
@permission_required("core.manage_shop")
def products_inline(request, product_group_id, as_string=False,
template_name="manage/property_groups/products_inline.html"):
"""Renders the products tab of the property groups management views.
"""
property_group = PropertyGroup.objects.get(pk=product_group_id)
group_products = property_group.products.all().select_related('parent')
r = request.POST if request.method == 'POST' else request.GET
s = request.session
# If we get the parameter ``keep-filters`` or ``page`` we take the
# filters out of the request resp. session. The request takes precedence.
# The page parameter is given if the user clicks on the next/previous page
# links. The ``keep-filters`` parameters is given is the users adds/removes
# products. In this way we keeps the current filters when we needed to. If
# the whole page is reloaded there is no ``keep-filters`` or ``page`` and
# all filters are reset as they should.
if r.get("keep-filters") or r.get("page"):
page = r.get("page", s.get("property_group_page", 1))
filter_ = r.get("filter", s.get("filter"))
category_filter = r.get("products_category_filter",
s.get("products_category_filter"))
else:
page = r.get("page", 1)
filter_ = r.get("filter")
category_filter = r.get("products_category_filter")
# The current filters are saved in any case for later use.
s["property_group_page"] = page
s["filter"] = filter_
s["products_category_filter"] = category_filter
filters = Q()
if filter_:
filters &= Q(name__icontains=filter_)
if category_filter:
if category_filter == "None":
filters &= Q(categories=None)
elif category_filter == "All":
pass
else:
# First we collect all sub categories and using the `in` operator
category = lfs_get_object_or_404(Category, pk=category_filter)
categories = [category]
categories.extend(category.get_all_children())
filters &= Q(categories__in=categories)
products = Product.objects.select_related('parent').filter(filters)
paginator = Paginator(products.exclude(pk__in=group_products), 25)
try:
page = paginator.page(page)
except EmptyPage:
page = 0
result = render_to_string(template_name, RequestContext(request, {
"property_group": property_group,
"group_products": group_products,
"page": page,
"paginator": paginator,
"filter": filter_
}))
if as_string:
return result
else:
return HttpResponse(
json.dumps({
"html": [["#products-inline", result]],
}), content_type='application/json')
@permission_required("core.manage_shop")
def assign_products(request, group_id):
"""Assign products to given property group with given property_group_id.
"""
property_group = lfs_get_object_or_404(PropertyGroup, pk=group_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product"):
temp_id = temp_id.split("-")[1]
product = Product.objects.get(pk=temp_id)
property_group.products.add(product)
html = [["#products-inline", products_inline(request, group_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Products have been assigned.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def remove_products(request, group_id):
"""Remove products from given property group with given property_group_id.
"""
property_group = lfs_get_object_or_404(PropertyGroup, pk=group_id)
for temp_id in request.POST.keys():
if temp_id.startswith("product"):
temp_id = temp_id.split("-")[1]
product = Product.objects.get(pk=temp_id)
property_group.products.remove(product)
# Notify removing
product_removed_property_group.send(sender=property_group, product=product)
html = [["#products-inline", products_inline(request, group_id, as_string=True)]]
result = json.dumps({
"html": html,
"message": _(u"Products have been removed.")
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
def _udpate_positions(group_id):
"""
"""
for i, gp in enumerate(GroupsPropertiesRelation.objects.filter(group=group_id)):
gp.position = (i + 1) * 10
gp.save()
@permission_required("core.manage_shop")
def sort_property_groups(request):
"""Sort property groups
"""
property_group_list = request.POST.get("serialized", "").split('&')
assert (isinstance(property_group_list, list))
if len(property_group_list) > 0:
pos = 10
for cat_str in property_group_list:
elem, pg_id = cat_str.split('=')
pg = PropertyGroup.objects.get(pk=pg_id)
pg.position = pos
pg.save()
pos += 10
result = json.dumps({
"message": _(u"The Property groups have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
|
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Trains a Latent Dirichlet Allocation (LDA) model on 20 Newsgroups.
LDA [1] is a topic model for documents represented as bag-of-words
(word counts). It attempts to find a set of topics so that every document from
the corpus is well-described by a few topics.
Suppose that there are `V` words in the vocabulary and we want to learn `K`
topics. For each document, let `w` be its `V`-dimensional vector of word counts
and `theta` be its `K`-dimensional vector of topics. Let `Beta` be a `KxN`
matrix in which each row is a discrete distribution over words in the
corresponding topic (in other words, belong to a unit simplex). Also, let
`alpha` be the `K`-dimensional vector of prior distribution parameters
(prior topic weights).
The model we consider here is obtained from the standard LDA by collapsing
the (non-reparameterizable) Categorical distribution over the topics
[1, Sec. 3.2; 3]. Then, the prior distribution is
`p(theta) = Dirichlet(theta | alpha)`, and the likelihood is
`p(w | theta, Beta) = OneHotCategorical(w | theta Beta)`. This means that we
sample the words from a Categorical distribution that is a weighted average
of topics, with the weights specified by `theta`. The number of samples (words)
in the document is assumed to be known, and the words are sampled independently.
We follow [2] and perform amortized variational inference similarly to
Variational Autoencoders. We use a neural network encoder to
parameterize a Dirichlet variational posterior distribution `q(theta | w)`.
Then, an evidence lower bound (ELBO) is maximized with respect to
`alpha`, `Beta` and the parameters of the variational posterior distribution.
We use the preprocessed version of 20 newsgroups dataset from [3].
This implementation uses the hyperparameters of [2] and reproduces the reported
results (test perplexity ~875).
Example output for the final iteration:
```none
elbo
-567.829
loss
567.883
global_step
180000
reconstruction
-562.065
topics
index=8 alpha=0.46 write article get think one like know say go make
index=21 alpha=0.29 use get thanks one write know anyone car please like
index=0 alpha=0.09 file use key program window image available information
index=43 alpha=0.08 drive use card disk system problem windows driver mac run
index=6 alpha=0.07 god one say christian jesus believe people bible think man
index=5 alpha=0.07 space year new program use research launch university nasa
index=33 alpha=0.07 government gun law people state use right weapon crime
index=36 alpha=0.05 game team play player year win season hockey league score
index=42 alpha=0.05 go say get know come one think people see tell
index=49 alpha=0.04 bike article write post get ride dod car one go
kl
5.76408
perplexity
873.206
```
#### References
[1]: David M. Blei, Andrew Y. Ng, Michael I. Jordan. Latent Dirichlet
Allocation. In _Journal of Machine Learning Research_, 2003.
http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf
[2]: Michael Figurnov, Shakir Mohamed, Andriy Mnih. Implicit Reparameterization
Gradients, 2018.
https://arxiv.org/abs/1805.08498
[3]: Akash Srivastava, Charles Sutton. Autoencoding Variational Inference For
Topic Models. In _International Conference on Learning Representations_,
2017.
https://arxiv.org/abs/1703.01488
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
# Dependency imports
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import numpy as np
import scipy.sparse
from six.moves import cPickle as pickle
from six.moves import urllib
import tensorflow as tf
from tensorflow_probability import edward2 as ed
flags.DEFINE_float(
"learning_rate", default=3e-4, help="Learning rate.")
flags.DEFINE_integer(
"max_steps", default=180000, help="Number of training steps to run.")
flags.DEFINE_integer(
"num_topics",
default=50,
help="The number of topics.")
flags.DEFINE_list(
"layer_sizes",
default=["300", "300", "300"],
help="Comma-separated list denoting hidden units per layer in the encoder.")
flags.DEFINE_string(
"activation",
default="relu",
help="Activation function for all hidden layers.")
flags.DEFINE_integer(
"batch_size",
default=32,
help="Batch size.")
flags.DEFINE_float(
"prior_initial_value", default=0.7, help="The initial value for prior.")
flags.DEFINE_integer(
"prior_burn_in_steps",
default=120000,
help="The number of training steps with fixed prior.")
flags.DEFINE_string(
"data_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"), "lda/data"),
help="Directory where data is stored (if using real data).")
flags.DEFINE_string(
"model_dir",
default=os.path.join(os.getenv("TEST_TMPDIR", "/tmp"), "lda/"),
help="Directory to put the model's fit.")
flags.DEFINE_integer(
"viz_steps", default=10000, help="Frequency at which save visualizations.")
flags.DEFINE_bool("fake_data", default=False, help="If true, uses fake data.")
flags.DEFINE_bool(
"delete_existing",
default=False,
help="If true, deletes existing directory.")
FLAGS = flags.FLAGS
def _clip_dirichlet_parameters(x):
"""Clips the Dirichlet parameters to the numerically stable KL region."""
return tf.clip_by_value(x, 1e-3, 1e3)
def _softplus_inverse(x):
"""Returns inverse of softplus function."""
return np.log(np.expm1(x))
def latent_dirichlet_allocation(concentration, topics_words):
"""Latent Dirichlet Allocation in terms of its generative process.
The model posits a distribution over bags of words and is parameterized by
a concentration and the topic-word probabilities. It collapses per-word
topic assignments.
Args:
concentration: A Tensor of shape [1, num_topics], which parameterizes the
Dirichlet prior over topics.
topics_words: A Tensor of shape [num_topics, num_words], where each row
(topic) denotes the probability of each word being in that topic.
Returns:
bag_of_words: A random variable capturing a sample from the model, of shape
[1, num_words]. It represents one generated document as a bag of words.
"""
topics = ed.Dirichlet(concentration=concentration, name="topics")
word_probs = tf.matmul(topics, topics_words)
# The observations are bags of words and therefore not one-hot. However,
# log_prob of OneHotCategorical computes the probability correctly in
# this case.
bag_of_words = ed.OneHotCategorical(probs=word_probs, name="bag_of_words")
return bag_of_words
def make_lda_variational(activation, num_topics, layer_sizes):
"""Creates the variational distribution for LDA.
Args:
activation: Activation function to use.
num_topics: The number of topics.
layer_sizes: The number of hidden units per layer in the encoder.
Returns:
lda_variational: A function that takes a bag-of-words Tensor as
input and returns a distribution over topics.
"""
encoder_net = tf.keras.Sequential()
for num_hidden_units in layer_sizes:
encoder_net.add(
tf.keras.layers.Dense(
num_hidden_units,
activation=activation,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
encoder_net.add(
tf.keras.layers.Dense(
num_topics,
activation=tf.nn.softplus,
kernel_initializer=tf.compat.v1.glorot_normal_initializer()))
def lda_variational(bag_of_words):
concentration = _clip_dirichlet_parameters(encoder_net(bag_of_words))
return ed.Dirichlet(concentration=concentration, name="topics_posterior")
return lda_variational
def make_value_setter(**model_kwargs):
"""Creates a value-setting interceptor.
Args:
**model_kwargs: dict of str to Tensor. Keys are the names of random variable
in the model to which this interceptor is being applied. Values are
Tensors to set their value to.
Returns:
set_values: Function which sets the value of intercepted ops.
"""
def set_values(f, *args, **kwargs):
"""Sets random variable values to its aligned value."""
name = kwargs.get("name")
if name in model_kwargs:
kwargs["value"] = model_kwargs[name]
return ed.interceptable(f)(*args, **kwargs)
return set_values
def model_fn(features, labels, mode, params, config):
"""Builds the model function for use in an Estimator.
Arguments:
features: The input features for the Estimator.
labels: The labels, unused here.
mode: Signifies whether it is train or test or predict.
params: Some hyperparameters as a dictionary.
config: The RunConfig, unused here.
Returns:
EstimatorSpec: A tf.estimator.EstimatorSpec instance.
"""
del labels, config
# Set up the model's learnable parameters.
logit_concentration = tf.compat.v1.get_variable(
"logit_concentration",
shape=[1, params["num_topics"]],
initializer=tf.compat.v1.initializers.constant(
_softplus_inverse(params["prior_initial_value"])))
concentration = _clip_dirichlet_parameters(
tf.nn.softplus(logit_concentration))
num_words = features.shape[1]
topics_words_logits = tf.compat.v1.get_variable(
"topics_words_logits",
shape=[params["num_topics"], num_words],
initializer=tf.compat.v1.glorot_normal_initializer())
topics_words = tf.nn.softmax(topics_words_logits, axis=-1)
# Compute expected log-likelihood. First, sample from the variational
# distribution; second, compute the log-likelihood given the sample.
lda_variational = make_lda_variational(
params["activation"],
params["num_topics"],
params["layer_sizes"])
with ed.tape() as variational_tape:
_ = lda_variational(features)
with ed.tape() as model_tape:
with ed.interception(
make_value_setter(topics=variational_tape["topics_posterior"])):
posterior_predictive = latent_dirichlet_allocation(concentration,
topics_words)
log_likelihood = posterior_predictive.distribution.log_prob(features)
tf.compat.v1.summary.scalar("log_likelihood",
tf.reduce_mean(input_tensor=log_likelihood))
# Compute the KL-divergence between two Dirichlets analytically.
# The sampled KL does not work well for "sparse" distributions
# (see Appendix D of [2]).
kl = variational_tape["topics_posterior"].distribution.kl_divergence(
model_tape["topics"].distribution)
tf.compat.v1.summary.scalar("kl", tf.reduce_mean(input_tensor=kl))
# Ensure that the KL is non-negative (up to a very small slack).
# Negative KL can happen due to numerical instability.
with tf.control_dependencies(
[tf.compat.v1.assert_greater(kl, -1e-3, message="kl")]):
kl = tf.identity(kl)
elbo = log_likelihood - kl
avg_elbo = tf.reduce_mean(input_tensor=elbo)
tf.compat.v1.summary.scalar("elbo", avg_elbo)
loss = -avg_elbo
# Perform variational inference by minimizing the -ELBO.
global_step = tf.compat.v1.train.get_or_create_global_step()
optimizer = tf.compat.v1.train.AdamOptimizer(params["learning_rate"])
# This implements the "burn-in" for prior parameters (see Appendix D of [2]).
# For the first prior_burn_in_steps steps they are fixed, and then trained
# jointly with the other parameters.
grads_and_vars = optimizer.compute_gradients(loss)
grads_and_vars_except_prior = [
x for x in grads_and_vars if x[1] != logit_concentration]
def train_op_except_prior():
return optimizer.apply_gradients(
grads_and_vars_except_prior,
global_step=global_step)
def train_op_all():
return optimizer.apply_gradients(
grads_and_vars,
global_step=global_step)
train_op = tf.cond(
pred=global_step < params["prior_burn_in_steps"],
true_fn=train_op_except_prior,
false_fn=train_op_all)
# The perplexity is an exponent of the average negative ELBO per word.
words_per_document = tf.reduce_sum(input_tensor=features, axis=1)
log_perplexity = -elbo / words_per_document
tf.compat.v1.summary.scalar(
"perplexity", tf.exp(tf.reduce_mean(input_tensor=log_perplexity)))
(log_perplexity_tensor,
log_perplexity_update) = tf.compat.v1.metrics.mean(log_perplexity)
perplexity_tensor = tf.exp(log_perplexity_tensor)
# Obtain the topics summary. Implemented as a py_func for simplicity.
topics = tf.compat.v1.py_func(
functools.partial(get_topics_strings, vocabulary=params["vocabulary"]),
[topics_words, concentration],
tf.string,
stateful=False)
tf.compat.v1.summary.text("topics", topics)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={
"elbo": tf.compat.v1.metrics.mean(elbo),
"log_likelihood": tf.compat.v1.metrics.mean(log_likelihood),
"kl": tf.compat.v1.metrics.mean(kl),
"perplexity": (perplexity_tensor, log_perplexity_update),
"topics": (topics, tf.no_op()),
},
)
def get_topics_strings(topics_words, alpha, vocabulary,
topics_to_print=10, words_per_topic=10):
"""Returns the summary of the learned topics.
Arguments:
topics_words: KxV tensor with topics as rows and words as columns.
alpha: 1xK tensor of prior Dirichlet concentrations for the
topics.
vocabulary: A mapping of word's integer index to the corresponding string.
topics_to_print: The number of topics with highest prior weight to
summarize.
words_per_topic: Number of wodrs per topic to return.
Returns:
summary: A np.array with strings.
"""
alpha = np.squeeze(alpha, axis=0)
# Use a stable sorting algorithm so that when alpha is fixed
# we always get the same topics.
highest_weight_topics = np.argsort(-alpha, kind="mergesort")
top_words = np.argsort(-topics_words, axis=1)
res = []
for topic_idx in highest_weight_topics[:topics_to_print]:
l = ["index={} alpha={:.2f}".format(topic_idx, alpha[topic_idx])]
l += [vocabulary[word] for word in top_words[topic_idx, :words_per_topic]]
res.append(" ".join(l))
return np.array(res)
ROOT_PATH = "https://github.com/akashgit/autoencoding_vi_for_topic_models/raw/9db556361409ecb3a732f99b4ef207aeb8516f83/data/20news_clean"
FILE_TEMPLATE = "{split}.txt.npy"
def download(directory, filename):
"""Download a file."""
filepath = os.path.join(directory, filename)
if tf.io.gfile.exists(filepath):
return filepath
if not tf.io.gfile.exists(directory):
tf.io.gfile.makedirs(directory)
url = os.path.join(ROOT_PATH, filename)
print("Downloading %s to %s" % (url, filepath))
urllib.request.urlretrieve(url, filepath)
return filepath
def newsgroups_dataset(directory, split_name, num_words, shuffle_and_repeat):
"""20 newsgroups as a tf.data.Dataset."""
data = np.load(download(directory, FILE_TEMPLATE.format(split=split_name)))
# The last row is empty in both train and test.
data = data[:-1]
# Each row is a list of word ids in the document. We first convert this to
# sparse COO matrix (which automatically sums the repeating words). Then,
# we convert this COO matrix to CSR format which allows for fast querying of
# documents.
num_documents = data.shape[0]
indices = np.array([(row_idx, column_idx)
for row_idx, row in enumerate(data)
for column_idx in row])
sparse_matrix = scipy.sparse.coo_matrix(
(np.ones(indices.shape[0]), (indices[:, 0], indices[:, 1])),
shape=(num_documents, num_words),
dtype=np.float32)
sparse_matrix = sparse_matrix.tocsr()
dataset = tf.data.Dataset.range(num_documents)
# For training, we shuffle each epoch and repeat the epochs.
if shuffle_and_repeat:
dataset = dataset.shuffle(num_documents).repeat()
# Returns a single document as a dense TensorFlow tensor. The dataset is
# stored as a sparse matrix outside of the graph.
def get_row_py_func(idx):
def get_row_python(idx_py):
return np.squeeze(np.array(sparse_matrix[idx_py].todense()), axis=0)
py_func = tf.compat.v1.py_func(
get_row_python, [idx], tf.float32, stateful=False)
py_func.set_shape((num_words,))
return py_func
dataset = dataset.map(get_row_py_func)
return dataset
def build_fake_input_fns(batch_size):
"""Builds fake data for unit testing."""
num_words = 1000
vocabulary = [str(i) for i in range(num_words)]
random_sample = np.random.randint(
10, size=(batch_size, num_words)).astype(np.float32)
def train_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(random_sample)
dataset = dataset.batch(batch_size).repeat()
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
def eval_input_fn():
dataset = tf.data.Dataset.from_tensor_slices(random_sample)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn, vocabulary
def build_input_fns(data_dir, batch_size):
"""Builds iterators for train and evaluation data.
Each object is represented as a bag-of-words vector.
Arguments:
data_dir: Folder in which to store the data.
batch_size: Batch size for both train and evaluation.
Returns:
train_input_fn: A function that returns an iterator over the training data.
eval_input_fn: A function that returns an iterator over the evaluation data.
vocabulary: A mapping of word's integer index to the corresponding string.
"""
with open(download(data_dir, "vocab.pkl"), "r") as f:
words_to_idx = pickle.load(f)
num_words = len(words_to_idx)
vocabulary = [None] * num_words
for word, idx in words_to_idx.items():
vocabulary[idx] = word
# Build an iterator over training batches.
def train_input_fn():
dataset = newsgroups_dataset(
data_dir, "train", num_words, shuffle_and_repeat=True)
# Prefetching makes training about 1.5x faster.
dataset = dataset.batch(batch_size).prefetch(32)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
# Build an iterator over the heldout set.
def eval_input_fn():
dataset = newsgroups_dataset(
data_dir, "test", num_words, shuffle_and_repeat=False)
dataset = dataset.batch(batch_size)
return tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
return train_input_fn, eval_input_fn, vocabulary
def main(argv):
del argv # unused
params = FLAGS.flag_values_dict()
params["layer_sizes"] = [int(units) for units in params["layer_sizes"]]
params["activation"] = getattr(tf.nn, params["activation"])
if FLAGS.delete_existing and tf.io.gfile.exists(FLAGS.model_dir):
tf.compat.v1.logging.warn("Deleting old log directory at {}".format(
FLAGS.model_dir))
tf.io.gfile.rmtree(FLAGS.model_dir)
tf.io.gfile.makedirs(FLAGS.model_dir)
if FLAGS.fake_data:
train_input_fn, eval_input_fn, vocabulary = build_fake_input_fns(
FLAGS.batch_size)
else:
train_input_fn, eval_input_fn, vocabulary = build_input_fns(
FLAGS.data_dir, FLAGS.batch_size)
params["vocabulary"] = vocabulary
estimator = tf.estimator.Estimator(
model_fn,
params=params,
config=tf.estimator.RunConfig(
model_dir=FLAGS.model_dir,
save_checkpoints_steps=FLAGS.viz_steps,
),
)
for _ in range(FLAGS.max_steps // FLAGS.viz_steps):
estimator.train(train_input_fn, steps=FLAGS.viz_steps)
eval_results = estimator.evaluate(eval_input_fn)
# Print the evaluation results. The keys are strings specified in
# eval_metric_ops, and the values are NumPy scalars/arrays.
for key, value in eval_results.items():
print(key)
if key == "topics":
# Topics description is a np.array which prints better row-by-row.
for s in value:
print(s)
else:
print(str(value))
print("")
print("")
if __name__ == "__main__":
tf.compat.v1.app.run()
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains code for parsing and building a dictionary from text.
"""
from typing import Optional
from parlai.core.params import ParlaiParser
from parlai.core.opt import Opt
from parlai.core.build_data import modelzoo_path
from parlai.utils.bpe import bpe_factory, BPEHelper
from .agents import Agent
from .build_data import make_dir
from collections import defaultdict
import copy
import numpy as np
import os
import json
import re
import parlai.utils.logging as logging
from parlai.utils.io import PathManager
from typing import List
import enum
class TokenizationMode(enum.Enum):
TRAIN_TIME_TEXT = 0
TRAIN_TIME_LABEL = 1
TEST_TIME_TEXT = 2
TEST_TIME_LABEL = 3
RETOK = re.compile(r'\w+|[^\w\s]|\n', re.UNICODE)
def escape(s):
r"""
Replace potential special characters with escaped version.
For example, \n => \\n and \t => \\t
:param s:
string to escape
"""
return s.replace('\n', '\\n').replace('\t', '\\t').replace('\r', '\\r')
def unescape(s):
r"""
Revert escaped characters back to their special version.
For example, \\n => \n and \\t => \t
:param s:
string to unescape
"""
return s.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r')
def find_ngrams(token_dict, text, n):
"""
Break text into ngrams that appear in ``token_dict``.
:param token_dict:
``dict`` to check for ngrams
:param text:
``str`` to look for ngrams in
:param n:
``int`` max size of ngrams
"""
# base case
if n <= 1:
return text
# tokens committed to output
saved_tokens = []
# tokens remaining to be searched in sentence
search_tokens = text[:]
# tokens stored until next ngram found
next_search = []
while len(search_tokens) >= n:
ngram = ' '.join(search_tokens[:n])
if ngram in token_dict:
# first, search previous unmatched words for smaller ngrams
sub_n = min(len(next_search), n - 1)
saved_tokens.extend(find_ngrams(token_dict, next_search, sub_n))
next_search.clear()
# then add this ngram
saved_tokens.append(ngram)
# then pop this ngram from the remaining words to search
search_tokens = search_tokens[n:]
else:
next_search.append(search_tokens.pop(0))
remainder = next_search + search_tokens
sub_n = min(len(remainder), n - 1)
saved_tokens.extend(find_ngrams(token_dict, remainder, sub_n))
return saved_tokens
class DictionaryAgent(Agent):
"""
Builds and/or loads a dictionary.
The dictionary provides access to the frequency of each token, functions to
translate sentences from tokens to their vectors (list of ints, each int is the
index of a token in the dictionary) and back from vectors to tokenized text.
"""
default_lang = 'english'
default_maxngram = -1
default_minfreq = 0
default_maxtokens = -1
default_null = '__null__'
default_start = '__start__'
default_end = '__end__'
default_unk = '__unk__'
default_tok = 're'
default_lower = False
default_textfields = 'text,labels'
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
"""
Add commandline arguments related to the dictionary.
"""
dictionary = parser.add_argument_group('Dictionary Arguments')
dictionary.add_argument(
'-df',
'--dict-file',
help='path to dictionary file. defaults to [model_file].dict if '
'not set and model_file is set.',
hidden=True,
)
dictionary.add_argument(
'--dict-initpath',
hidden=True,
help='path to a saved dictionary to load tokens / counts from to '
'seed the dictionary with initial tokens and/or frequencies',
)
dictionary.add_argument(
'--dict-language',
default=DictionaryAgent.default_lang,
hidden=True,
help='sets language for the punkt sentence tokenizer',
)
dictionary.add_argument(
'--dict-max-ngram-size',
type=int,
hidden=True,
default=DictionaryAgent.default_maxngram,
help='looks for ngrams of up to this size. this is ignored when '
'building the dictionary. note: this takes approximate '
'runtime of len(sentence)^max_ngram_size',
)
dictionary.add_argument(
'--dict-minfreq',
default=DictionaryAgent.default_minfreq,
type=int,
help='minimum frequency of words to include them in sorted '
'dict or minimum frequency of bpe codecs',
hidden=True,
)
dictionary.add_argument(
'--dict-maxtokens',
default=DictionaryAgent.default_maxtokens,
type=int,
help='max number of tokens to include in dictionary or bpe codecs',
hidden=True,
)
dictionary.add_argument(
'--dict-nulltoken',
default=DictionaryAgent.default_null,
hidden=True,
help='empty token, can be used for padding or just empty values',
)
dictionary.add_argument(
'--dict-starttoken',
default=DictionaryAgent.default_start,
hidden=True,
help='token for starting sentence generation, if needed',
)
dictionary.add_argument(
'--dict-endtoken',
default=DictionaryAgent.default_end,
hidden=True,
help='token for end of sentence markers, if needed',
)
dictionary.add_argument(
'--dict-unktoken',
default=DictionaryAgent.default_unk,
hidden=True,
help='token to return for unavailable words',
)
dictionary.add_argument(
'-tok',
'--dict-tokenizer',
default=DictionaryAgent.default_tok,
help='Which tokenizer to use. Defaults to "split", which splits '
'on whitespace as well as recognizing basic punctuation. '
'Other options include nltk, gpt2 and bytelevelbpe.',
hidden=True,
)
dictionary.add_argument(
'--dict-lower',
default=DictionaryAgent.default_lower,
type='bool',
help='Whether or not to lowercase all text seen.',
hidden=True,
)
dictionary.add_argument(
'--bpe-debug',
action='store_true',
hidden=True,
help='Leave BPE tokens untouched in output. Useful for debugging.',
)
dictionary.add_argument(
'--dict-textfields',
default=DictionaryAgent.default_textfields,
hidden=True,
help='Observation fields which dictionary learns vocabulary from. '
'Tasks with additional fields may add to this list to handle '
'any extra vocabulary.',
)
dictionary = BPEHelper.add_cmdline_args(dictionary, partial_opt=partial_opt)
return dictionary
def __init__(self, opt: Opt, shared=None):
"""
Initialize DictionaryAgent.
"""
self.opt = copy.deepcopy(opt)
self.minfreq = opt.get('dict_minfreq', DictionaryAgent.default_minfreq)
self.null_token = opt.get('dict_nulltoken', DictionaryAgent.default_null)
self.end_token = opt.get('dict_endtoken', DictionaryAgent.default_end)
self.unk_token = opt.get('dict_unktoken', DictionaryAgent.default_unk)
self.start_token = opt.get('dict_starttoken', DictionaryAgent.default_start)
self.max_ngram_size = opt.get(
'dict_max_ngram_size', DictionaryAgent.default_maxngram
)
self.tokenizer = opt.get('dict_tokenizer', DictionaryAgent.default_tok)
self.lower = opt.get('dict_lower', DictionaryAgent.default_lower)
self.maxtokens = opt.get('dict_maxtokens', DictionaryAgent.default_maxtokens)
self.textfields = opt.get(
'dict_textfields', DictionaryAgent.default_textfields
).split(",")
# used to signal whether we should use training time tricks, like bpe droput
self._tokenization_mode = TokenizationMode.TEST_TIME_LABEL
try:
self.tokenizer_fun = getattr(self, self.tokenizer + '_tokenize')
except AttributeError:
raise AttributeError(
'tokenizer type {} not yet supported'.format(self.tokenizer)
)
if shared:
self.freq = shared.get('freq', {})
self.tok2ind = shared.get('tok2ind', {})
self.ind2tok = shared.get('ind2tok', {})
else:
self.additional_special_tokens: List[str] = []
self.freq = defaultdict(int)
self.tok2ind = {}
self.ind2tok = {}
if self.null_token:
self.add_token(self.null_token)
if self.start_token:
# set special start of sentence word token
self.add_token(self.start_token)
if self.end_token:
# set special end of sentence word token
self.add_token(self.end_token)
if self.unk_token:
# set special unknown word token
self.add_token(self.unk_token)
loaded = False
# If data built via pytorch data teacher, we need to load prebuilt dict
if opt.get('dict_file'):
opt['dict_file'] = modelzoo_path(opt.get('datapath'), opt['dict_file'])
if PathManager.exists(opt['dict_file']):
# load pre-existing dictionary
self.load(opt['dict_file'])
loaded = True
if not loaded and opt.get('dict_initpath'):
# load seed dictionary
opt['dict_initpath'] = modelzoo_path(
opt.get('datapath'), opt['dict_initpath']
)
# don't check isfile first, should fail if file not found
self.load(opt['dict_initpath'])
opt['dict_loaded'] = loaded
# cache unk token for later
self._unk_token_idx = self.tok2ind.get(self.unk_token)
# initialize tokenizers
if self.tokenizer == 'nltk':
try:
import nltk
except ImportError:
raise ImportError('Please install nltk (pip install nltk)')
# nltk-specific setup
st_path = 'tokenizers/punkt/{0}.pickle'.format(opt['dict_language'])
try:
self.sent_tok = nltk.data.load(st_path)
except LookupError:
nltk.download('punkt')
self.sent_tok = nltk.data.load(st_path)
self.word_tok = nltk.tokenize.treebank.TreebankWordTokenizer()
elif self.tokenizer in ['bpe', 'gpt2', 'bytelevelbpe', 'slow_bytelevel_bpe']:
self.bpe = bpe_factory(opt, shared)
self.bpe.sync_with_dict(self)
if not shared:
if self.null_token:
# fix count for null token to one billion and three
self.freq[self.null_token] = 1000000003
if self.start_token:
# fix count for start of sentence token to one billion and two
self.freq[self.start_token] = 1000000002
if self.end_token:
# fix count for end of sentence token to one billion and one
self.freq[self.end_token] = 1000000001
if self.unk_token:
# fix count for unknown token to one billion
self.freq[self.unk_token] = 1000000000
if opt.get('dict_file'):
self.save_path = opt['dict_file']
def add_additional_special_tokens(self, additional_special_tokens: List[str]):
"""
Add additional special tokens to the dictionary.
Should only be called after initialization of the existing dictionary.
"""
self.additional_special_tokens = additional_special_tokens
for tok in self.additional_special_tokens:
self.add_token(tok)
for i, tok in enumerate(self.additional_special_tokens):
self.freq[tok] = 1000000000 + 4 + i
if hasattr(self, 'bpe'):
self.bpe.add_special_tokens(self, self.additional_special_tokens)
elif self.tokenizer in ('split', 're', 'space'):
pass
else:
raise NotImplementedError(
f"Special Tokens are not supported with this tokenizer. "
f"(--dict-tokenizer {self.tokenizer}). File a github issue or "
f"pull request if you need others extended. "
f"https://github.com/facebookresearch/ParlAI"
)
def is_prebuilt(self):
"""
Indicates whether the dictionary is fixed, and does not require building.
"""
return self.tokenizer == 'gpt2'
def add_token(self, word):
"""
Add a single token to the dictionary.
"""
if word not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[word] = index
self.ind2tok[index] = word
def __contains__(self, key):
"""
Return if the dictionary contains the key.
If key is an int, returns whether the key is in the indices. If key is a str,
return if the token is in the dict of tokens.
"""
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return key in self.tok2ind
def _word_lookup(self, key):
# return index from token, or unk_token's index, or None
return self.tok2ind.get(key, self._unk_token_idx)
def _index_lookup(self, key):
# return token from index, or unk_token
return self.ind2tok.get(key, self.unk_token)
def __getitem__(self, key):
"""
Lookup the word or ID.
If key is an int, returns the corresponding token. If it does not exist, return
the unknown token. If key is a str, return the token's index. If the token is
not in the dictionary, return the index of the unknown token. If there is no
unknown token, return ``None``.
"""
if type(key) == str:
return self._word_lookup(key)
if type(key) == int:
return self._index_lookup(key)
def __len__(self):
return len(self.tok2ind)
def __setitem__(self, key, value):
"""
Set the frequency for a word to a value.
If the key is not in the dictionary, add it to the dictionary and set its
frequency to value.
"""
key = str(key)
if self.lower:
key = key.lower()
self.freq[key] = int(value)
self.add_token(key)
def keys(self):
"""
Return all the words in the dictionary.
"""
return self.tok2ind.keys()
def nltk_tokenize(self, text, building=False):
"""
Tokenize using NLTK PunktTokenizer.
Uses nltk-trained PunktTokenizer for sentence tokenization and Treebank Word
Tokenizer for tokenizing words within sentences.
"""
return (
token
for sent in self.sent_tok.tokenize(text)
for token in self.word_tok.tokenize(sent)
)
def gpt2_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def slow_bytelevel_bpe_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
def bytelevelbpe_tokenize(self, text):
"""
Tokenize using Gpt2 BPE tokenizer.
"""
return self.bpe_tokenize(text)
@staticmethod
def re_tokenize(text):
r"""
Tokenize using a liberal regular expression.
Find boundaries between word characters, newlines, and non-word
non-whitespace tokens ``(r'[\\w\\n]+ | [^\\w\\s] | \\n')``.
This splits along whitespace and punctuation and keeps the newline as
a token in the returned list.
"""
return RETOK.findall(text)
@staticmethod
def split_tokenize(text):
"""
Tokenize on whitespace and some limited punctuation.
Splits tokens based on whitespace after adding whitespace around
punctuation.
Use re_tokenize if you want more robust handling of punctuation.
"""
return (
text.replace('.', ' . ')
.replace(',', ' , ')
.replace(';', ' ; ')
.replace(':', ' : ')
.replace('!', ' ! ')
.replace('?', ' ? ')
.split()
)
@staticmethod
def space_tokenize(text):
"""
Tokenize exactly on spaces.
Useful when text is pre-tokenized.
"""
return text.strip().split(' ')
def span_tokenize(self, text):
"""
Tokenize and find starting index of each token in the original string.
"""
tokens = self.tokenize(text)
curr_idx = 0
indices = []
for t in tokens:
while text[curr_idx] != t[0]:
curr_idx += 1
indices.append((curr_idx, curr_idx + len(t)))
curr_idx += len(t)
return tokens, indices
def tokenize(self, text, building=False):
"""
Return a sequence of tokens from the iterable.
Also handles special tokens for some tokenizers
"""
if self.tokenizer in ('re', 'split', 'space'):
for special_token in self.additional_special_tokens:
index = text.find(special_token)
if index == -1:
continue
left = text[:index]
right = text[index + len(special_token) :]
tokens_left = self.tokenize(left, building) if left else []
tokens_right = self.tokenize(right, building) if right else []
return tokens_left + [special_token] + tokens_right
if self.lower:
text = text.lower()
# calls the selected tokenizer function e.g. 're' => re_tokenize(text)
word_tokens = self.tokenizer_fun(text)
if not building and self.max_ngram_size > 1:
# search for ngrams during parse-time
# TODO(ahm): support build-time ngrams using word2vec heuristic?
word_tokens = find_ngrams(self.tok2ind, word_tokens, self.max_ngram_size)
return word_tokens
def bpe_tokenize(self, text):
"""
Return a sequence of BPE-tokens from the text.
"""
return self.bpe.encode(text)
def add_to_dict(self, tokens):
"""
Build dictionary from the list of provided tokens.
"""
self.built = False
for token in tokens:
self.add_token(token)
self.freq[token] += 1
def remove_tail(self, min_freq):
"""
Remove elements below the frequency cutoff from the dictionary.
"""
to_remove = []
for token, freq in self.freq.items():
if freq < min_freq:
# queue up removals since can't mutate dict during iteration
to_remove.append(token)
for token in to_remove:
del self.freq[token]
idx = self.tok2ind.pop(token)
del self.ind2tok[idx]
def _remove_non_bpe(self):
"""
Set the dictionary vocab to the bpe vocab, merging counts.
"""
to_remove = []
to_add = []
for token, freq in self.freq.items():
tokens = self.bpe_tokenize(token)
if len(tokens) != 1:
for t in tokens:
to_add.append((t, freq))
to_remove.append(token)
for token in to_remove:
del self.freq[token]
idx = self.tok2ind.pop(token)
del self.ind2tok[idx]
for token, freq in to_add:
self.add_token(token)
self.freq[token] += freq
def resize_to_max(self, maxtokens):
"""
Trims the dictionary to the maximum number of tokens.
"""
if maxtokens >= 0 and len(self.tok2ind) > maxtokens:
for k in range(maxtokens, len(self.ind2tok)):
v = self.ind2tok[k]
del self.ind2tok[k]
del self.tok2ind[v]
del self.freq[v]
def load(self, filename):
"""
Load pre-existing dictionary in 'token[<TAB>count]' format.
Initialize counts from other dictionary, or 0 if they aren't included.
"""
logging.info(f'loading dictionary from {filename}')
lower_special = self.null_token == self.null_token.lower()
SPECIAL_TOKENS = {'__UNK__', '__NULL__', '__END__', '__START__'}
with PathManager.open(filename, 'r', encoding='utf-8', errors='ignore') as read:
for line in read:
split = line.rstrip("\n\r").split('\t')
token = unescape(split[0])
if lower_special and token in SPECIAL_TOKENS:
token = token.lower()
cnt = int(split[1]) if len(split) > 1 else 0
self.freq[token] = cnt
self.add_token(token)
logging.info(f'num words = {len(self)}')
def save(self, filename=None, append=False, sort=True):
"""
Save dictionary to file.
Format is 'token<TAB>count' for every token in the dictionary, sorted
by count with the most frequent words first.
If ``append`` (default ``False``) is set to ``True``, appends instead of
overwriting.
If ``sort`` (default ``True``), then first sort the dictionary before saving.
"""
filename = self.opt['dict_file'] if filename is None else filename
make_dir(os.path.dirname(filename))
if self.tokenizer in ['bpe', 'gpt2', 'bytelevelbpe', 'slow_bytelevel_bpe']:
needs_removal = self.bpe.finalize(
self.freq, num_symbols=self.maxtokens, minfreq=self.minfreq
)
if needs_removal:
self._remove_non_bpe()
elif filename != self.opt.get('dict_file'):
# need to copy over the old codecs file
self.bpe.copy_codecs_file(filename + '.codecs')
if sort and self.bpe.should_sort():
self.sort(trim=False)
elif sort:
self.sort(trim=True)
logging.info(f'Saving dictionary to {filename}')
mode = 'a' if append else 'w'
with PathManager.open(filename, mode, encoding='utf-8') as write:
for i in self.ind2tok.keys():
tok = self.ind2tok[i]
cnt = self.freq[tok]
write.write('{tok}\t{cnt}\n'.format(tok=escape(tok), cnt=cnt))
# save opt file
with PathManager.open(filename + '.opt', 'w', encoding='utf-8') as handle:
json.dump(self.opt, handle, indent=4)
# save the byte level bpe model file as well
if self.tokenizer == 'bytelevelbpe' or self.tokenizer == 'slow_bytelevel_bpe':
# This saves filename-vocab.json and filename-merges.txt as
# hugging face tokenizer does
self.bpe.save(os.path.dirname(filename), os.path.basename(filename))
def sort(self, trim=True):
"""
Sort the dictionary.
Inline operation. Rearranges the dictionary so that the elements with
the lowest index have the highest counts. This reindexes the dictionary
according to the sorted frequencies, breaking ties alphabetically by
token.
:param bool trim:
If True, truncate the dictionary based on minfreq and maxtokens.
"""
if trim and self.tokenizer == 'gpt2':
raise RuntimeError("You should not trim the dictionary when using gpt-2.")
if trim and self.tokenizer == 'bytelevelbpe':
raise RuntimeError(
"You should not trim the dictionary when using bytelevelbpe."
)
# sort first by count, then alphabetically
if trim:
self.remove_tail(self.minfreq)
sorted_pairs = sorted(self.freq.items(), key=lambda x: (-x[1], x[0]))
new_tok2ind = {}
new_ind2tok = {}
for i, (tok, _) in enumerate(sorted_pairs):
new_tok2ind[tok] = i
new_ind2tok[i] = tok
self.tok2ind = new_tok2ind
self.ind2tok = new_ind2tok
if trim:
self.resize_to_max(self.maxtokens)
assert len(self.freq) == len(self.ind2tok) == len(self.tok2ind)
return sorted_pairs
def parse(self, txt_or_vec, vec_type=list):
"""
Parse either text or a vector of indices.
Calls `~txt2vec` if `txt_or_vec is a string, or `~vec2txt` otherwise.
:param vec_type:
type of the returned vector if the input is a string.
"""
# TODO: try to deprecate this, preferring straight txt2vec
if type(txt_or_vec) == str:
return self.txt2vec(txt_or_vec, vec_type)
else:
return self.vec2txt(txt_or_vec)
def txt2vec(self, text: str, vec_type=list):
"""
Convert a string to a vector (list of ints).
First runs a sentence tokenizer, then a word tokenizer.
:param type vec_type:
The type of the returned vector if the input is a string. Suggested
``list``, ``tuple``, ``set``, or ``np.ndarray``.
"""
assert isinstance(
text, str
), f'Input to txt2vec must be string, not {type(text)}'
itr = (self._word_lookup(token) for token in self.tokenize(text))
if vec_type == list or vec_type == tuple or vec_type == set:
res = vec_type(itr)
elif vec_type == np.ndarray:
res = np.fromiter(itr, np.int)
else:
raise RuntimeError('Type {} not supported by dict'.format(vec_type))
return res
def vec2txt(self, vector, delimiter=' '):
"""
Convert a vector of IDs to a string.
Converts a vector (iterable of ints) into a string, with each token separated by
the delimiter (default ``' '``).
"""
tokens = [self[int(idx)] for idx in vector]
if self.tokenizer in ['gpt2', 'bpe', 'slow_bytelevel_bpe']:
# if we used a BPE tokenizer we need to rejoin the encodings
text = self.bpe.decode(tokens, vector, delimiter)
elif self.tokenizer == 'bytelevelbpe':
# We add special tokens in the beginning of ParlAI dict but in the
# end of Hugging Face dict, there is an offset of #(extra tokens) between them.
extra_tokens = 4 # length of special tokens
vector = [
self.bpe.special_tok_map[int(idx)]
if int(idx) in self.bpe.special_tok_map
else int(idx) - extra_tokens
for idx in vector
]
tokens = [self[int(idx)] for idx in vector]
text = self.bpe.decode(tokens, vector, delimiter)
else:
text = delimiter.join(self[int(idx)] for idx in vector)
return text
def act(self):
"""
Add words in the last observation to the dictionary.
This checks any fields in the message present in the --dict-textfields argument
(e.g. "text,labels").
"""
for textfield in self.textfields:
source = self.observation.get(textfield)
if source is None:
continue
# fields may be singleton strings or lists of strings.
# wrap the singleton strings in a list to iterate over them
if type(source) is str:
source = [source]
for text in source:
if text:
self.add_to_dict(self.tokenize(text))
return {'id': 'Dictionary'}
def share(self):
"""
Share internal dicts.
"""
shared = super().share()
shared['freq'] = self.freq
shared['tok2ind'] = self.tok2ind
shared['ind2tok'] = self.ind2tok
return shared
def shutdown(self):
"""
Save on shutdown if ``save_path`` is set.
"""
if hasattr(self, 'save_path'):
self.save(self.save_path)
def __str__(self):
"""
Return string representation of frequencies in dictionary.
"""
return str(self.freq)
def set_tokenization_mode(self, mode: TokenizationMode):
"""
Indicate what "kind" of tokenization is being done.
This can be Training Time / Testing Time, and it can be over
context or labels.
This is used to signal from TorchAgent to the dict that it's allowed
to enable things like BPE dropout. It is NOT used to indicate whether
the dictionary itself is in training time.
Use True for training time, False for not.
"""
self._context_mode = mode
if hasattr(self, 'bpe'):
# enable bpe dropout only in texts at training time. disable all
# other times
self.bpe.enable_bpe_dropout(mode == TokenizationMode.TRAIN_TIME_TEXT)
|
|
import numpy as np
import os
import cPickle
import gzip
# np.seterr(all='raise')
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, \
adjusted_rand_score, roc_auc_score
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelSpikeAndSlab, \
DiscreteTimeStandardHawkesModel
from pyhawkes.plotting.plotting import plot_network
def demo(seed=None):
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K20_C4_T10000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
init_with_map = True
if init_with_map:
init_len = T
print "Initializing with BFGS on first ", init_len, " time bins."
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test spike and slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
network_hypers['c'] = None
network_hypers['v'] = None
network_hypers['m'] = None
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers,
network_hypers=network_hypers)
test_model.add_data(S)
# F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
# Initialize plots
ln, im_net, im_clus = initialize_plots(true_model, test_model, S)
###########################################################
# Fit the test model with Gibbs sampling
###########################################################
N_samples = 5
samples = []
lps = []
# plls = []
for itr in xrange(N_samples):
lps.append(test_model.log_probability())
# plls.append(test_model.heldout_log_likelihood(S_test, F=F_test))
samples.append(test_model.copy_sample())
print ""
print "Gibbs iteration ", itr
print "LP: ", lps[-1]
test_model.resample_model()
# Update plot
if itr % 1 == 0:
update_plots(itr, test_model, S, ln, im_clus, im_net)
###########################################################
# Analyze the samples
###########################################################
analyze_samples(true_model, init_model, samples, lps)
def initialize_plots(true_model, test_model, S):
K = true_model.K
C = true_model.C
R = true_model.compute_rate(S=S)
T = S.shape[0]
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W)
plt.pause(0.001)
# Plot the true and inferred firing rate
plt.figure(2)
plt.plot(np.arange(T), R[:,0], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(T), test_model.compute_rate()[:,0], '-r')[0]
plt.show()
# Plot the block affiliations
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus = plt.imshow(KC,
interpolation="none", cmap="Greys",
aspect=float(C)/K)
im_net = plot_network(np.ones((K,K)), test_model.weight_model.W_effective, vmax=0.5)
plt.pause(0.001)
plt.show()
plt.pause(0.001)
return ln, im_net, im_clus
def update_plots(itr, test_model, S, ln, im_clus, im_net):
K = test_model.K
C = test_model.C
T = S.shape[0]
plt.figure(2)
ln.set_data(np.arange(T), test_model.compute_rate()[:,0])
plt.title("\lambda_{%d}. Iteration %d" % (0, itr))
plt.pause(0.001)
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus.set_data(KC)
plt.title("KxC: Iteration %d" % itr)
plt.pause(0.001)
plt.figure(4)
plt.title("W: Iteration %d" % itr)
im_net.set_data(test_model.weight_model.W_effective)
plt.pause(0.001)
def analyze_samples(true_model, init_model, samples, lps):
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
c_samples = np.array([s.network.c for s in samples])
p_samples = np.array([s.network.p for s in samples])
v_samples = np.array([s.network.v for s in samples])
lps = np.array(lps)
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
p_mean = p_samples[offset:, ...].mean(axis=0)
v_mean = v_samples[offset:, ...].mean(axis=0)
print "A true: ", true_model.weight_model.A
print "W true: ", true_model.weight_model.W
print "g true: ", true_model.impulse_model.g
print "lambda0 true: ", true_model.bias_model.lambda0
print ""
print "A mean: ", A_mean
print "W mean: ", W_mean
print "g mean: ", g_mean
print "lambda0 mean: ", lambda0_mean
print "v mean: ", v_mean
print "p mean: ", p_mean
plt.figure()
plt.plot(np.arange(N_samples), lps, 'k')
plt.xlabel("Iteration")
plt.ylabel("Log probability")
plt.show()
# # Predictive log likelihood
# pll_init = init_model.heldout_log_likelihood(S_test)
# plt.figure()
# plt.plot(np.arange(N_samples), pll_init * np.ones(N_samples), 'k')
# plt.plot(np.arange(N_samples), plls, 'r')
# plt.xlabel("Iteration")
# plt.ylabel("Predictive log probability")
# plt.show()
# Compute the link prediction accuracy curves
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.show()
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_samples), amis, '-r')
plt.plot(np.arange(N_samples), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.ioff()
plt.show()
# demo(2203329564)
# demo(2728679796)
demo(11223344)
|
|
from logging import Handler
from queue import Queue
from threading import Thread
import logging.config
import logging
import asyncio
import datetime
import yaml
import sys
import os
from git import Repo
from functools import partial, wraps
from pythonjsonlogger import jsonlogger
RED = '\033[91m'
BLUE = '\033[94m'
BOLD = '\033[1m'
END = '\033[0m'
_BRANCH_NAME = None
http_pings_logs_disabled = True
def get_current_working_repo():
branch_name = None
current_tag = None
try:
repo = Repo(os.getcwd())
branch = repo.active_branch
branch_name = branch.name
tags = repo.tags
if tags and isinstance(tags, list):
current_tag = tags[-1].name
except:
pass
return (branch_name, current_tag)
def http_ping_filter(record):
if "GET /ping/" in record.getMessage():
return 0
return 1
class LogFormatHelper:
LogFormat = '%a %l %u %t "%r" %s %b %D "%{Referrer}i" "%{User-Agent}i" %{X-Request-ID}o'
class CustomTimeLoggingFormatter(logging.Formatter):
def formatTime(self, record, datefmt=None): # noqa
"""
Overrides formatTime method to use datetime module instead of time module
to display time in microseconds. Time module by default does not resolve
time to microseconds.
"""
record.branchname = _BRANCH_NAME
if datefmt:
s = datetime.datetime.now().strftime(datefmt)
else:
t = datetime.datetime.now().strftime(self.default_time_format)
s = self.default_msec_format % (t, record.msecs)
return s
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def __init__(self, *args, **kwargs):
self.extrad = kwargs.pop('extrad', {})
super().__init__(*args, **kwargs)
def add_fields(self, log_record, record, message_dict):
message_dict.update(self.extrad)
record.branchname = _BRANCH_NAME
super().add_fields(log_record, record, message_dict)
def patch_async_emit(handler: Handler):
base_emit = handler.emit
queue = Queue()
def loop():
while True:
record = queue.get()
try:
base_emit(record)
except:
print(sys.exc_info())
def async_emit(record):
queue.put(record)
thread = Thread(target=loop)
thread.daemon = True
thread.start()
handler.emit = async_emit
return handler
def patch_add_handler(logger):
base_add_handler = logger.addHandler
def async_add_handler(handler):
async_handler = patch_async_emit(handler)
base_add_handler(async_handler)
return async_add_handler
DEFAULT_CONFIG_YAML = """
# logging config
version: 1
disable_existing_loggers: False
handlers:
stream:
class: logging.StreamHandler
level: INFO
formatter: ctf
stream: ext://sys.stdout
stats:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_stats.log
exceptions:
class: logging.FileHandler
level: INFO
formatter: cjf
filename: logs/vyked_exceptions.log
service:
class: logging.FileHandler
level: INFO
formatter: ctf
filename: logs/vyked_service.log
formatters:
ctf:
(): vyked.utils.log.CustomTimeLoggingFormatter
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
cjf:
(): vyked.utils.log.CustomJsonFormatter
format: '{ "timestamp":"%(asctime)s", "message":"%(message)s"}'
datefmt: '%Y-%m-%d %H:%M:%S,%f'
root:
handlers: [stream, service]
level: INFO
loggers:
registry:
handlers: [service,]
level: INFO
stats:
handlers: [stats]
level: INFO
exceptions:
handlers: [exceptions]
level: INFO
"""
def setup_logging(_):
try:
with open('config_log.json', 'r') as f:
config_dict = yaml.load(f.read())
except:
config_dict = yaml.load(DEFAULT_CONFIG_YAML)
logging.getLogger('asyncio').setLevel(logging.WARNING)
logger = logging.getLogger()
logger.handlers = []
logger.addHandler = patch_add_handler(logger)
global _BRANCH_NAME
(branch_name, current_tag) = get_current_working_repo()
_BRANCH_NAME = branch_name
if 'handlers' in config_dict:
for handler in config_dict['handlers']:
if 'branch_name' in config_dict['handlers'][handler] and config_dict['handlers'][handler]['branch_name'] == True:
config_dict['handlers'][handler]['release'] = current_tag if current_tag else None
if 'tags' in config_dict['handlers'][handler] and isinstance(config_dict['handlers'][handler]['tags'], dict):
config_dict['handlers'][handler]['tags']['branch'] = branch_name if branch_name else None
logging.config.dictConfig(config_dict)
if http_pings_logs_disabled:
for handler in logging.root.handlers:
handler.addFilter(http_ping_filter)
def log(fn=None, logger=logging.getLogger(), debug_level=logging.DEBUG):
"""
logs parameters and result - takes no arguments
"""
if fn is None:
return partial(log, logger=logger, debug_level=debug_level)
@wraps(fn)
def func(*args, **kwargs):
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name not in ['self', 'cls']:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(fn.__name__, arg_string,
kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
try:
result = yield from wrapped_fn(*args, **kwargs)
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
except Exception as e:
string = (RED + BOLD + '>> ' + END + '{0} raised exception :{1}'.format(fn.__name__, str(e)))
logger.log(debug_level, string)
raise e
return func
def logx(supress_args=[], supress_all_args=False, supress_result=False, logger=logging.getLogger(),
debug_level=logging.DEBUG):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__name__, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__name__,
arg_string, kwargs))
logger.log(debug_level, string)
wrapped_fn = fn
if not asyncio.iscoroutine(fn):
wrapped_fn = asyncio.coroutine(fn)
result = yield from wrapped_fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result : {1}'.format(fn.__name__, result)
logger.log(debug_level, string)
return result
return func
return decorator
|
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009-2011, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: matt.clark@eucalyptus.com
'''
Created on Mar 7, 2012
@author: clarkmatthew
Place holder class to provide convenience for testing, modifying, and
retrieving Eucalyptus cloud property information
Intention is to reduce the time in looking up property names, and values
outside of the eutester test lib, etc
Note: Debug output for the tester.sys command are controled by the
eutester/eucaops object
Sample:
cat my_cloud.conf
> 192.168.1.76 CENTOS 6.3 64 REPO [CLC WS]
> 192.168.1.77 CENTOS 6.3 64 REPO [SC00 CC00]
> 192.168.1.78 CENTOS 6.3 64 REPO [NC00]
from eucaops import Eucaops
from eutester import euproperties
Eucaops(config_file='my_cloud.conf', password='mypassword')
ep_mgr = euproperties.Euproperty_Manager(tester,
verbose=True,
debugmethod=tester.debug)
#get some storage service properties, and some property values...
#Get/Set value from dynamic method created in Euproperty_Manager...
san_host_prop_value = ep_mgr.get_storage_sanhost_value()
ep_mgr.set_storage_sanhost_value('192.168.1.200')
#Get/set value from euproperty directly...
san_host_prop = ep_mgr.get_property('san_host', 'storage', 'PARTI00')
san_host_prop_value = san_host_prop.get()
san_host_prop_set('192.168.1.200'
#Get multiple properties at once based on certain filters...
storage_properties = ep_mgr.get_properties(service_type='storage')
partition1_properties = ep_mgr.get_properties(partition='partition1')
'''
import types
import re
import copy
class Euproperty_Type():
authentication = 'authentication'
autoscaling = 'autoscaling'
bootstrap = 'bootstrap'
cloud = 'cloud'
cloudwatch = 'cloudwatch'
cluster = 'cluster'
dns = 'dns'
imaging = 'imaging'
loadbalancing = 'loadbalancing'
objectstorage = 'objectstorage'
reporting = 'reporting'
storage = 'storage'
system = 'system'
tagging = 'tagging'
tokens = 'tokens'
vmwarebroker = 'vmwarebroker'
walrus = 'walrus'
www = 'www'
@classmethod
def get_type_by_string(cls, typestring):
try:
if hasattr(cls, str(typestring)):
return getattr(cls, str(typestring))
except AttributeError, ae:
print ('Property type:' + str(str) +
" not defined, new property type?")
raise ae
class Euproperty():
def __init__(self, prop_mgr, property_string, service_type, partition,
name, value, mandatory=False, description=""):
self.prop_mgr = prop_mgr
self.service_type = Euproperty_Type.get_type_by_string(service_type)
self.partition = partition
self.name = name
self.value = value
self.property_string = property_string
self.prop_mgr = prop_mgr
self.lastvalue = value
self.mandatory = mandatory
self.description = description
def update(self):
newprop = self.prop_mgr.update_property_list(
property_name=self.property_string)[0]
self = newprop
def get(self):
return self.value
def set(self, value):
return self.prop_mgr.set_property(self, value)
def reset_to_default(self):
return self.prop_mgr.reset_property_to_default(self)
def print_self(self, include_header=True, show_description=True,
print_method=None, printout=True):
if printout and not print_method:
print_method = self.prop_mgr.debug
name_len = 50
service_len = 20
part_len = 20
value_len = 30
line_len = 120
ret = ""
header = str('NAME').ljust(name_len)
header += "|" + str('SERVICE TYPE').center(service_len)
header += "|" + str('PARTITION').center(part_len)
header += "|" + str('VALUE').center(value_len)
header += "\n"
out = str(self.name).ljust(name_len)
out += "|" + str(self.service_type).center(service_len)
out += "|" + str(self.partition).center(part_len)
out += "|" + str(self.value).center(value_len)
out += "\n"
line = "-"
for x in xrange(0, line_len):
line += "-"
line += "\n"
if include_header:
ret = "\n" + line + header + line
ret += out
if show_description:
ret += "DESCRIPTION: " + self.description + "\n"
ret += line
if print_method:
print_method(ret)
return ret
class Property_Map():
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Euproperty_Manager():
tester = None
verbose = False
debugmethod = None
def __init__(self, tester, verbose=False, machine=None,
service_url=None, debugmethod=None):
self.tester = tester
self.debugmethod = debugmethod or tester.debug
self.verbose = verbose
self.work_machine = machine or self.get_clc()
self.access_key = self.tester.aws_access_key_id
self.secret_key = self.tester.aws_secret_access_key
self.service_url = service_url or str(
'http://' + str(self.get_clc().hostname) +
':8773/services/Eucalytpus')
self.cmdpath = self.tester.eucapath+'/usr/sbin/'
self.properties = []
self.property_map = Property_Map()
self.update_property_list()
self.tester.property_manager = self
self.zones = self.tester.get_zones()
def get_clc(self):
return self.tester.service_manager.get_enabled_clc().machine
def debug(self, msg):
'''
simple method for printing debug.
msg - mandatory - string to be printed
method - optional - callback to over ride default printing method
'''
if (self.debugmethod is None):
print (str(msg))
else:
self.debugmethod(msg)
def show_all_authentication_properties(self,
partition=None,
debug_method=None,
descriptions=True):
return self.show_all_properties(
service_type=Euproperty_Type.authentication,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_bootstrap_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.bootstrap,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_cloud_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.cloud,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_cluster_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.cluster,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_reporting_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.reporting,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_storage_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.storage,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_system_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.system,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_vmwarebroker_properties(self,
partition=None,
debug_method=None,
descriptions=True):
return self.show_all_properties(
service_type=Euproperty_Type.vmwarebroker,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_walrus_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.walrus,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_objectstorage_properties(self,
partition=None,
debug_method=None,
descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.objectstorage,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_www_properties(self, partition=None, debug_method=None, descriptions=True):
return self.show_all_properties(service_type=Euproperty_Type.www,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_autoscaling_properties(self,
partition=None,
debug_method=None,
descriptions=True):
return self.show_all_properties(
service_type=Euproperty_Type.autoscaling,
partition=partition,
debug_method=debug_method,
descriptions=descriptions)
def show_all_loadbalancing_properties(self,
partition=None,
debug_method=None):
return self.show_all_properties(
service_type=Euproperty_Type.loadbalancing,
partition=partition,
debug_method=debug_method,
descriptions=True)
def show_all_tagging_properties(self, partition=None, debug_method=None):
return self.show_all_properties(service_type=Euproperty_Type.tagging,
partition=partition,
debug_method=debug_method,
descriptions=True)
def show_all_imaging_properties(self, partition=None, debug_method=None):
return self.show_all_properties(service_type=Euproperty_Type.imaging,
partition=partition,
debug_method=debug_method,
descriptions=True)
def show_all_properties(self,
partition=None,
service_type=None,
value=None,
search_string=None,
list=None,
debug_method=None,
descriptions=True):
debug_method = debug_method or self.debug
list = list or self.get_properties(partition=partition,
service_type=service_type,
value=value,
search_string=search_string)
first = list.pop(0)
buf = first.print_self(include_header=True,
show_description=descriptions,
printout=False)
count = 1
last_service_type = first.service_type
for prop in list:
count += 1
if prop.service_type != last_service_type:
last_service_type = prop.service_type
print_header = True
else:
print_header = False
buf += prop.print_self(include_header=print_header,
show_description=descriptions,
printout=False)
debug_method(buf)
def get_properties(self,
partition=None,
service_type=None,
value=None,
search_string=None,
force_update=False):
self.debug('get_properties: partition:' +
str(partition) + ", service_type:" + str(service_type) +
", value:" + str(value) + ", force_update:" +
str(force_update))
ret_props = []
if not self.properties or force_update:
self.update_property_list()
properties = copy.copy(self.properties)
if partition and properties:
properties = self.get_all_properties_for_partition(partition,
list=properties)
if service_type and properties:
properties = self.get_all_properties_for_service(service_type,
list=properties)
if search_string and properties:
properties = self.get_all_properties_by_search_string(
search_string, list=properties)
if properties:
if value:
for prop in properties:
if prop.value == value:
ret_props.append(prop)
else:
ret_props.extend(properties)
return ret_props
def get_property(self, name, service_type, partition, force_update=False):
self.debug('Get Property:' + str(name))
ret_prop = None
list = self.get_properties(partition=partition,
service_type=service_type,
force_update=force_update)
if list:
ret_prop = self.get_euproperty_by_name(name, list=list)
return ret_prop
def update_property_list(self, property_name=''):
newlist = []
newprop = None
self.debug("updating property list...")
self.zones = self.tester.get_zones()
cmdout = self.work_machine.sys(
self.cmdpath+'euca-describe-properties -v -U ' +
str(self.service_url) + ' -I ' + str(self.access_key) +
' -S ' + str(self.secret_key) + ' ' + property_name,
code=0, verbose=self.verbose)
for propstring in cmdout:
try:
if re.search("^PROPERTY", propstring):
newprop = self.parse_euproperty_from_string(propstring)
elif newprop:
if (re.search("^DESCRIPTION", propstring) and
re.search(newprop.name, propstring)):
newprop.description = \
self.parse_euproperty_description(propstring)
else:
newprop.value = str(newprop.value) + str(propstring)
except Exception, e:
self.debug('Error processing property line: ' + propstring)
raise e
if newprop and not newprop in newlist:
newlist.append(newprop)
if property_name:
for newprop in newlist:
if newprop:
for oldprop in self.properties:
if oldprop.property_string == newprop.property_string:
oldprop = newprop
self.create_dynamic_property_map_from_property(newprop)
else:
self.properties = newlist
self.property_map = Property_Map()
for prop in self.properties:
if prop:
self.create_dynamic_property_map_from_property(prop)
return newlist
def parse_euproperty_description(self, propstring):
'''
Example string to parse:
"DESCRIPTION www.http_port Listen to HTTP on this port."
'''
split = str(propstring).replace('DESCRIPTION', '').split()
description = " ".join(str(x) for x in split[1:])
return str(description)
def parse_property_value_from_string(self, propstring):
split = str(propstring).replace('PROPERTY', '').split()
prop_value = " ".join(str(x) for x in split[1:])
return str(prop_value)
def parse_euproperty_from_string(self, propstring):
'''
Intended to convert a line of ouptut from euca-describe-properties into
a euproperty.
:param str: line of output, example:
"PROPERTY walrus.storagemaxbucketsizeinmb 5120"
:returns euproperty
'''
propstring = str(propstring).replace('PROPERTY', '').strip()
ret_service_type = None
ret_partition = None
splitstring = propstring.split()
#get the property string, example: "walrus.storagemaxbucketsizeinmb"
property_string = splitstring.pop(0)
ret_value = " ".join(splitstring)
for prop in self.properties:
#if this property is in our list, update the value and return
if prop.property_string == property_string:
prop.lastvalue = prop.value
prop.value = ret_value
return prop
ret_name = property_string
#...otherwise this property is not in our list yet,
# create a new property
#parse property string into values...
propattrs = property_string.split('.')
#See if the first element is a zone-partition
#First store and remove the zone-partition if it's in the list
for zone in self.zones:
if zone == propattrs[0]:
#Assume this is the zone-partition id/name,
# remove it from the propattrs list
ret_partition = propattrs.pop(0)
break
#Move along items in list until we reach a service type
for index in xrange(0, len(propattrs)):
try:
ret_service_type = Euproperty_Type.get_type_by_string(
propattrs[index])
propattrs.remove(propattrs[index])
break
except AttributeError:
pass
except IndexError:
self.debug("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
"!!!!!!!!!!!!!!!!!!!!!!!!!\n" +
"Need to add new service? " +
"No service type found for: " +
str(property_string) +
"\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
"!!!!!!!!!!!!!!!!!!!!!!!!!\n")
ret_service_type = propattrs.pop(0)
#self.debug("ret_service_type: "+str(ret_service_type))
#Store the name of the property
ret_name = ".".join(propattrs)
newprop = Euproperty(self, property_string, ret_service_type,
ret_partition, ret_name, ret_value)
return newprop
def create_dynamic_property_map_from_property(self, euproperty):
context = self.property_map
if not hasattr(context, 'all'):
setattr(context, 'all', Property_Map())
all_map = getattr(context, 'all')
if euproperty.partition:
if not hasattr(context, str(euproperty.partition)):
setattr(context, str(euproperty.partition), Property_Map())
context = getattr(context, str(euproperty.partition))
if euproperty.service_type:
if not hasattr(context, str(euproperty.service_type)):
setattr(context, str(euproperty.service_type), Property_Map())
context = getattr(context, str(euproperty.service_type))
object_name = str(euproperty.name).replace('.', '_')
if not hasattr(context, object_name):
setattr(context, object_name, euproperty)
if not hasattr(all_map, object_name):
setattr(all_map, object_name, euproperty)
def get_euproperty_by_name(self, name, list=None):
props = []
list = list or self.properties
for property in list:
if property.name == name:
return property
raise EupropertyNotFoundException('Property not found by name:' +
str(name))
def get_all_properties_for_partition(self,
partition,
list=None,
verbose=False):
self.debug('Get all properties for partition:' + str(partition))
props = []
list = list or self.properties
for property in list:
if property.partition == partition:
if verbose:
self.debug('property:' + str(property.name) +
", prop.partition:" + str(property.partition) +
",partition:" + str(partition))
props.append(property)
self.debug('Returning list of len:' + str(len(props)))
return props
def get_all_properties_for_service(self, service, list=None):
props = []
list = list or self.properties
for property in list:
if property.service_type == service:
props.append(property)
return props
def get_all_properties_by_search_string(self, search_string, list=None):
props = []
list = list or self.properties
for property in list:
if re.search(search_string, property.property_string):
props.append(property)
return props
def set_property(self, property, value):
if isinstance(property, Euproperty):
return self.set_property_by_property_string(
property.property_string, value)
else:
return self.set_property_by_property_string(str(property), value)
def set_property(self, property, value, reset_to_default=False):
'''
Sets the property 'prop' at eucaops/eutester object 'tester' to 'value'
Returns new value
prop - mandatory - str representing the property to set
value - mandatory - str representing the value to set the property to
eucaops - optional - the eucaops/eutester object to set the property at
'''
value = str(value)
if not isinstance(property, Euproperty):
try:
property = self.get_all_properties_by_search_string(property)
if len(property) > 1:
raise Exception('More than one euproperty found for '
'property string:' + str(property))
else:
property = property[0]
except Exception, e:
raise Exception('Could not fetch property to set. '
'Using string:' + str(property))
property.lastvalue = property.value
self.debug('Setting property(' + property.property_string +
') to value:' + str(value))
if reset_to_default:
output = self.work_machine.sys(
self.cmdpath + 'euca-modify-property -U ' +
str(self.service_url) + ' -I ' + str(self.access_key) +
' -S ' + str(self.secret_key) + ' -r ' +
str(property.property_string), code=0)
else:
output = self.work_machine.sys(
self.cmdpath + 'euca-modify-property -U ' +
str(self.service_url) + ' -I '+str(self.access_key) + ' -S ' +
str(self.secret_key) + ' -p ' +
str(property.property_string) + '=' + str(value),
code=0)
ret_value = None
if output:
for line in output:
line = line.strip()
if re.search('^PROPERTY', line):
ret_value = str(line).split()[2]
break
if ret_value is None:
raise EupropertiesException("set_property output from modify "
"was None")
#Confirm property value was set
if not reset_to_default and (ret_value != value) and\
not (not value and ret_value == '{}'):
ret_string = "\n".join(str(x) for x in output)
raise EupropertiesException(
"set property(" + property.property_string + ") to value(" +
str(value) + ") failed.Ret Value (" + str(ret_value) +
")\nRet String\n" + ret_string)
property.value = ret_value
return ret_value
def get_property_by_string(self, property_string):
property = None
for prop in self.properties:
if prop.property_string == property_string:
property = prop
break
return property
def set_property_value_by_string(self, property_string, value):
property = self.get_property_by_string(property_string)
if not property:
raise Exception('Property not found for:' + str(property_string))
property.set(value)
def get_property_value_by_string(self, property_string):
property = self.get_property_by_string(property_string)
if not property:
raise Exception('Property not found for:' + str(property_string))
return property.value
def reset_property_to_default(self, prop):
'''
Sets a property 'prop' at eucaops/eutester object 'eucaops' to it's
default value
Returns new value
prop - mandatory - string representing the property to set
ucaops - optional - the eucaops/eutester object to set the property at
'''
if not isinstance(prop, Euproperty):
prop = self.get_all_properties_by_search_string(prop)[0]
return self.set_property(prop, None, reset_to_default=True)
def get_property_default_value(self, prop, ireadthewarning=False):
'''
Note: This hack method is intrusive! It will briefly reset the property
This is a temporary method to get a properties default method
prop - mandatory - string, eucalyptus property
ireadthewarning - mandatory - boolean, to warn user this method
is intrusive
'''
if (ireadthewarning is False):
raise EupropertiesException("ireadthewarning is set to false in "
"get_property_default_value")
original = prop.get()
default = self.reset_property_to_default(prop)
prop.set(original)
return default
class EupropertiesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EupropertyNotFoundException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
|
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("middlename", contact.middlename)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.home)
self.change_field_value("mobile", contact.mobile)
self.change_field_value("work", contact.work)
self.change_field_value("fax", contact.fax)
self.change_field_value("email", contact.email)
self.change_field_value("email2", contact.email2)
self.change_field_value("email3", contact.email3)
self.change_field_value("homepage", contact.homepage)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[13]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[1]//option[13]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[8]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[2]//option[8]").click()
self.change_field_value("byear", contact.byear)
if not wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[10]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[3]//option[10]").click()
if not wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").is_selected():
wd.find_element_by_xpath("//div[@id='content']/form/select[4]//option[6]").click()
self.change_field_value("ayear", contact.ayear)
wd.find_element_by_xpath('//*/select[@name="new_group"]/option[@value="%s"]' % contact.contact_in_group).click()
self.change_field_value("address2", contact.address2)
self.change_field_value("phone2", contact.phone2)
self.change_field_value("notes", contact.notes)
def create_new(self, contact):
wd = self.app.wd
self.app.open_home_page()
# Go to "Add new" link
wd.find_element_by_link_text("add new").click()
self.fill_contact_form(contact)
wd.find_element_by_name("submit").click()
self.app.go_to_home_page()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_first_contact(self):
wd = self.app.wd
self.modify_contact_by_index(0)
def modify_contact_by_index(self, new_contact_data, index):
wd = self.app.wd
self.app.open_home_page()
self.open_contact_to_edit_by_index(index)
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.app.go_to_home_page()
self.contact_cache = None
def modify_contact_by_id(self, new_contact_data, id):
wd = self.app.wd
self.app.open_home_page()
self.open_contact_to_edit_by_id(id)
self.fill_contact_form(new_contact_data)
wd.find_element_by_name("update").click()
self.app.go_to_home_page()
self.contact_cache = None
def del_first_contact(self):
wd = self.app.wd
self.del_contact_by_index(0)
def del_contact_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
wd.find_elements_by_name("selected[]")[index].click()
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.app.go_to_home_page()
self.contact_cache = None
def del_contact_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.app.go_to_home_page()
self.contact_cache = None
def count(self):
wd = self.app.wd
self.app.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cache.append(Contact(id=id, firstname=firstname, lastname=lastname,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails))
return self.contact_cache
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_to_edit_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_css_selector('a[href="edit.php?id=%s"]' % id).click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.app.open_home_page()
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
address = wd.find_element_by_name("address").text
return Contact(firstname=firstname, lastname=lastname, id=id,
home=home, work=work, mobile=mobile, phone2=phone2,
email=email, email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id("content").text
home = re.search("H: (.*)", text).group(1)
mobile = re.search("M: (.*)", text).group(1)
work = re.search("W: (.*)", text).group(1)
phone2 = re.search("P: (.*)", text).group(1)
return Contact(home=home, work=work, mobile=mobile, phone2=phone2)
def get_contacts_in_group(self, group_id):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_xpath('//*/select[@name="group"]/option[@value="%s"]' % group_id).click()
self.contact_cache = []
for element in wd.find_elements_by_name("entry"):
cells = element.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
all_phones = cells[5].text
all_emails = cells[4].text
self.contact_cache.append(Contact(id=id, firstname=firstname, lastname=lastname,
all_phones_from_home_page=all_phones,
all_emails_from_home_page=all_emails))
return self.contact_cache
def add_contact_to_group(self, id, group_id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
wd.find_element_by_xpath('//*/select[@name="to_group"]/option[@value="%s"]' % group_id).click()
wd.find_element_by_name("add").click()
self.contact_cache = None
def remove_from_group(self, id, group_id):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_xpath('//*/select[@name="group"]/option[@value="%s"]' % group_id).click()
wd.find_element_by_css_selector("input[value='%s']" % id).click()
wd.find_element_by_name("remove").click()
self.contact_cache = None
|
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Code generation for slicing.
This is about slice lookups, assignments, and deletions. There is also a
special case, for using index values instead of objects. The slice objects
are also created here, and can be used for indexing.
"""
from .ErrorCodes import (
getErrorExitBoolCode,
getErrorExitCode,
getReleaseCode,
getReleaseCodes
)
from .Helpers import generateChildExpressionsCode
def generateBuiltinSliceCode(to_name, expression, emit, context):
arg_names = generateChildExpressionsCode(
expression = expression,
emit = emit,
context = context
)
getSliceObjectCode(
to_name = to_name,
lower_name = arg_names[0],
upper_name = arg_names[1],
step_name = arg_names[2],
emit = emit,
context = context
)
def getSliceLookupCode(to_name, source_name, lower_name, upper_name, emit,
context):
emit(
"%s = LOOKUP_SLICE( %s, %s, %s );" % (
to_name,
source_name,
lower_name if lower_name is not None else "Py_None",
upper_name if upper_name is not None else "Py_None"
)
)
getReleaseCodes(
release_names = (source_name, lower_name, upper_name),
emit = emit,
context = context
)
getErrorExitCode(
check_name = to_name,
emit = emit,
context = context
)
context.addCleanupTempName(to_name)
def getSliceLookupIndexesCode(to_name, lower_name, upper_name, source_name,
emit, context):
emit(
"%s = LOOKUP_INDEX_SLICE( %s, %s, %s );" % (
to_name,
source_name,
lower_name,
upper_name,
)
)
getReleaseCode(
release_name = source_name,
emit = emit,
context = context
)
getErrorExitCode(
check_name = to_name,
emit = emit,
context = context
)
context.addCleanupTempName(to_name)
def getSliceObjectCode(to_name, lower_name, upper_name, step_name, emit,
context):
emit(
"%s = MAKE_SLICEOBJ3( %s, %s, %s );" % (
to_name,
lower_name if lower_name is not None else "Py_None",
upper_name if upper_name is not None else "Py_None",
step_name if step_name is not None else "Py_None",
)
)
getReleaseCodes(
release_names = (lower_name, upper_name, step_name),
emit = emit,
context = context
)
# Note: Cannot fail
context.addCleanupTempName(to_name)
def getSliceAssignmentIndexesCode(target_name, lower_name, upper_name,
value_name, emit, context):
res_name = context.getBoolResName()
emit(
"%s = SET_INDEX_SLICE( %s, %s, %s, %s );" % (
res_name,
target_name,
lower_name,
upper_name,
value_name
)
)
getReleaseCodes(
release_names = (value_name, target_name),
emit = emit,
context = context
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
emit = emit,
context = context
)
def getSliceAssignmentCode(target_name, lower_name, upper_name, value_name,
emit, context):
res_name = context.getBoolResName()
emit(
"%s = SET_SLICE( %s, %s, %s, %s );" % (
res_name,
target_name,
lower_name if lower_name is not None else "Py_None",
upper_name if upper_name is not None else "Py_None",
value_name
)
)
getReleaseCodes(
release_names = (target_name, lower_name, upper_name, value_name),
emit = emit,
context = context
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
emit = emit,
context = context
)
def getSliceDelIndexesCode(target_name, lower_name, upper_name, emit, context):
res_name = context.getBoolResName()
emit(
"%s = DEL_INDEX_SLICE( %s, %s, %s );" % (
res_name,
target_name,
lower_name,
upper_name
)
)
getReleaseCode(
release_name = target_name,
emit = emit,
context = context
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
emit = emit,
context = context
)
def getSliceDelCode(target_name, lower_name, upper_name, emit, context):
res_name = context.getBoolResName()
emit(
"%s = DEL_SLICE( %s, %s, %s );" % (
res_name,
target_name,
lower_name if lower_name is not None else "Py_None",
upper_name if upper_name is not None else "Py_None"
)
)
getReleaseCodes(
release_names = (target_name, lower_name, upper_name),
emit = emit,
context = context
)
getErrorExitBoolCode(
condition = "%s == false" % res_name,
emit = emit,
context = context
)
|
|
from collections import defaultdict, Counter
from constants import compare, RE_THRESHOLD, RE_DELIMITERS
from utils import list_get
import logging
import re
import settings
class RulesManager(object):
"""
To manage all of the rules. A rule is a single description centered around a metric. A rule can have a part denoted
by an underscore followed by a part value (it is useful to use a numerical part value). Multiple rule parts are
automatically combined by having the same rule name.
A single rule is a dict object:
{
"{{ prefix__rule_name__rule_part }}": {
"ruleInfo": {
"ruleName": "{{rule_name}}",
"rulePart": "{{rule_part}}"
},
{
"ruleValue": {
"metric",
"threshold": {op, val},
"backoff",
"scale_factor",
"tolerance",
"weight"
}
}
}
"""
def __init__(self, app_def, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.app_def = app_def
self.rules = self._find_autoscaler_rules()
self._last_triggered_rule = None
def trigger_rules(self, metrics):
"""
Consumes a dict of metrics and attempts to match an application's rules to its metric information.
:param metrics: dict of metric values
:return: The recently triggered rule
"""
triggered_rule = self._get_matched_rule(metrics)
self._last_triggered_rule = triggered_rule
info_msg = "{app_name}: metrics: {metrics}"
self.logger.info(info_msg.format(app_name=self.app_def.app_name,
metrics=metrics))
info_msg = "{app_name}: last_triggered_rule set to: {triggered_rule}"
self.logger.info(info_msg.format(app_name=self.app_def.app_name,
triggered_rule=triggered_rule))
return self._last_triggered_rule
@property
def last_triggered_rule(self):
"""
The last rule triggered by the RuleManager
:return:
"""
triggered_rule = None
if self._last_triggered_rule is not None:
triggered_rule = self._last_triggered_rule
return triggered_rule
@property
def last_triggered_criteria(self):
"""
A helper property to aim at providing the core criteria of the last triggered rule.
:return: A dict of rule criteria
"""
criteria = {}
if self._last_triggered_rule is not None:
rule_value = self._last_triggered_rule[0].get("ruleValue")
criteria = dict(scale_factor=rule_value.get("scale_factor"),
tolerance=rule_value.get("tolerance"),
backoff=rule_value.get("backoff"))
return criteria
def _find_autoscaler_rules(self):
rules_found = {}
if self.app_def.labels:
for k, v in self.app_def.labels.items():
rule_match = re.match(r"^{prefix}_(?P<ruleName>[A-Za-z0-9]+)_?(?P<rulePart>[A-Za-z0-9]+)*".format(
prefix=settings.rules_prefix), k)
if rule_match is not None:
rule_values = re.split(RE_DELIMITERS, v)
if len(rule_values) < 5:
self.logger.warn("Scaling rule identified, but wrong number of arguments. Disregarding "
"{rule_name} = {rule_values}".format(rule_name=k,
rule_values=rule_values))
continue
rule_values_dict = dict(metric=rule_values[0],
threshold=self._parse_threshold(rule_values[1]),
tolerance=rule_values[2],
scale_factor=rule_values[3],
backoff=rule_values[4],
weight=list_get(rule_values, 5, 1.0))
rules_found[k] = dict(ruleValue=rule_values_dict, ruleInfo=rule_match.groupdict())
elif "max_instances" in k.lower():
self.max_instances = int(v)
elif "min_instances" in k.lower():
self.min_instances = int(v)
interpreted_rules = defaultdict(list)
if rules_found:
[interpreted_rules[v.get("ruleInfo").get("ruleName")].append(v)
for k, v in rules_found.items()]
return dict(interpreted_rules)
def is_app_participating(self):
""" Determine if the application is ready for scale actions
:return: application's participation in auto_scaling
"""
return self.app_def.is_app_participating
def is_app_within_min_or_max(self):
""" Determine if the application is ready for scale actions.
:return: application's participation in auto_scaling
"""
msg = "{0}: instances: min:{1}, running:{2}, max:{3}"
self.logger.info(msg.format(self.app_def.app_name,
int(self.min_instances),
int(self.app_def.tasksRunning),
int(self.max_instances)
))
return int(self.min_instances) <= \
int(self.app_def.tasksRunning) <= \
int(self.max_instances)
def is_app_ready(self):
""" Determine if the application is ready for scale actions
:return: application's readiness for scale actions
"""
result = False
if self.app_def.tasksRunning == self.app_def.instances:
result = True
self.logger.info("{0}: application ready: {1}".format(
self.app_def.app_name, result))
return result
def _get_matched_rule(self, metrics):
matched_rule = None
rules_found = []
for rule_name, rule in self.rules.items():
rule_criteria_count = len(rule)
matched_rule_criteria = [rule_item
for rule_item in rule
for metric_name, metric_value in metrics.items()
if rule_item.get("ruleValue").get("metric") in metric_name and
self._beyond_threshold(metric_value, rule_item.get("ruleValue").get("threshold"))]
if len(matched_rule_criteria) == rule_criteria_count:
rules_found.append(rule)
self.logger.debug("triggering rules by metrics: {rules_found}".format(rules_found=rules_found))
if len(rules_found) > 1:
matched_rule = self._find_best_matched_rule_by_criteria(rules_found)
elif len(rules_found) == 1:
matched_rule = rules_found[0]
return matched_rule
def _find_best_matched_rule_by_criteria(self, rules):
"""
This is the Highlander method; there can only be one...
Round 1 : Who has the most rule criteria matched?
:param rules:
:return:
"""
rule_criteria_counter = Counter()
for rule in rules:
rule_criteria_counter[rule[0].get("ruleInfo").get("ruleName")] = len(rule)
most_critical_rule_names = [k for k, v in rule_criteria_counter.items()
if v == rule_criteria_counter.most_common()[0][1]]
if len(most_critical_rule_names) > 1:
critical_rules = {name: next(rule
for rule_parts in rules
for rule in rule_parts
if name in rule.get("ruleInfo").get("ruleName"))
for name in most_critical_rule_names}
winning_rule = self._find_best_matched_rule_by_weight(critical_rules)
else:
winning_rule = [rule
for rule_parts in rules
for rule in rule_parts
if most_critical_rule_names[0] in
rule.get("ruleInfo").get("ruleName")]
self.logger.debug("winning rule by criteria: {winning_rule}".format(winning_rule=winning_rule))
return winning_rule
def _find_best_matched_rule_by_weight(self, rules):
"""
Round 2 : Which rule has the most weight? Weight is multiplied against scale_factor
:param rules: A dict of rules
:return: One rule with the maximum weight
"""
self.logger.debug(rules)
rule_weights = {rule.get("ruleInfo").get("ruleName"): abs(rule.get("ruleValue").get("scale_factor") *
rule.get("ruleValue").get("weight"))
for rule_name, rule in rules.items()}
self.logger.debug("winning rule by weight: {weighted_rule}".format(weighted_rule=rules.get(max(rule_weights))))
return rules.get(max(rule_weights))
def _parse_threshold(self, threshold):
"""
Parses the string representing the threshold. A threshold is a comparison operator and number.
:param threshold: String
:return: Lambda expression matching the operator
"""
m = re.search(RE_THRESHOLD, threshold)
return m.groupdict()
def _beyond_threshold(self, metric, threshold):
"""
This will answer whether the metric has met or exceeded the threshold. It uses
the dictionary created above, compare, to look up the operation, th["op"] and
passes the metric and threshold value, th["val"] to the lambda expression that
corresponds with the operation.
:param metric: The performance metric (cpu, memory, etc...)
:param threshold: String representation of the threshold (>, <, =, ==, <=, >=)
:return: (bool)
"""
return compare[threshold["op"]](float(metric), float(threshold["val"]))
|
|
from binascii import hexlify
import vdf
from steam.enums import EResult, EServerType
from steam.enums.emsg import EMsg
from steam.core.msg import MsgProto
from steam.utils.proto import proto_fill_from_dict
class Apps(object):
licenses = None #: :class:`dict` Accounts' package licenses
def __init__(self, *args, **kwargs):
super(Apps, self).__init__(*args, **kwargs)
self.licenses = {}
self.on(self.EVENT_DISCONNECTED, self.__handle_disconnect)
self.on(EMsg.ClientLicenseList, self._handle_licenses)
def __handle_disconnect(self):
self.licenses = {}
def _handle_licenses(self, message):
for entry in message.body.licenses:
self.licenses[entry.package_id] = entry
def get_player_count(self, app_id, timeout=5):
"""Get numbers of players for app id
:param app_id: app id
:type app_id: :class:`int`
:return: number of players
:rtype: :class:`int`, :class:`.EResult`
"""
resp = self.send_job_and_wait(MsgProto(EMsg.ClientGetNumberOfCurrentPlayersDP),
{'appid': app_id},
timeout=timeout
)
if resp is None:
return EResult.Timeout
elif resp.eresult == EResult.OK:
return resp.player_count
else:
return EResult(resp.eresult)
def get_product_info(self, apps=[], packages=[], meta_data_only=False, raw=False, auto_access_tokens=True, timeout=15):
"""Get product info for apps and packages
:param apps: items in the list should be either just ``app_id``, or :class:`dict`
:type apps: :class:`list`
:param packages: items in the list should be either just ``package_id``, or :class:`dict`
:type packages: :class:`list`
:param meta_data_only: only meta data will be returned in the reponse (e.g. change number, missing_token, sha1)
:type meta_data_only: :class:`bool`
:param raw: Data buffer for each app is returned as bytes in its' original form. Apps buffer is text VDF, and package buffer is binary VDF
:type raw: :class:`bool`
:param auto_access_token: automatically request and fill access tokens
:type auto_access_token: :class:`bool`
:return: dict with ``apps`` and ``packages`` containing their info, see example below
:rtype: :class:`dict`, :class:`None`
.. code:: python
{'apps': {570: {...}, ...},
'packages': {123: {...}, ...}
}
Access token is needed to access full information for certain apps, and also package info.
Each app and package has its' own access token.
If a token is required then ``_missing_token=True`` in the response.
App access tokens are obtained by calling :meth:`get_access_tokens`, and are returned only
when the account has a license for the specified app. Example code:
.. code:: python
result = client.get_product_info(apps=[123])
if result['apps'][123]['_missing_token']:
tokens = client.get_access_token(apps=[123])
result = client.get_product_info(apps=[{'appid': 123,
'access_token': tokens['apps'][123]
}])
.. note::
It is best to just request access token for all apps, before sending a product info
request.
Package tokens are located in the account license list. See :attr:`.licenses`
.. code:: python
result = client.get_product_info(packages=[{'packageid': 123,
'access_token': client.licenses[123].access_token,
}])
"""
if not apps and not packages:
return
if auto_access_tokens:
tokens = self.get_access_tokens(app_ids=list(map(lambda app: app['appid'] if isinstance(app, dict) else app, apps)),
package_ids=list(map(lambda pkg: pkg['packageid'] if isinstance(pkg, dict) else pkg, packages))
)
else:
tokens = None
message = MsgProto(EMsg.ClientPICSProductInfoRequest)
for app in apps:
app_info = message.body.apps.add()
if tokens:
app_info.access_token = tokens['apps'].get(app['appid'] if isinstance(app, dict) else app, 0)
if isinstance(app, dict):
proto_fill_from_dict(app_info, app)
else:
app_info.appid = app
for package in packages:
package_info = message.body.packages.add()
if tokens:
package_info.access_token = tokens['packages'].get(package['packageid'] if isinstance(package, dict) else package, 0)
if isinstance(package, dict):
proto_fill_from_dict(package_info, package)
else:
package_info.packageid = package
message.body.meta_data_only = meta_data_only
message.body.num_prev_failed = 0
message.body.supports_package_tokens = 1
job_id = self.send_job(message)
data = dict(apps={}, packages={})
while True:
chunk = self.wait_event(job_id, timeout=timeout, raises=True)
chunk = chunk[0].body
for app in chunk.apps:
if app.buffer and not raw:
data['apps'][app.appid] = vdf.loads(app.buffer[:-1].decode('utf-8', 'replace'))['appinfo']
else:
data['apps'][app.appid] = {}
data['apps'][app.appid]['_missing_token'] = app.missing_token
data['apps'][app.appid]['_change_number'] = app.change_number
data['apps'][app.appid]['_sha'] = hexlify(app.sha).decode('ascii')
data['apps'][app.appid]['_size'] = app.size
if app.buffer and raw:
data['apps'][app.appid]['_buffer'] = app.buffer
for pkg in chunk.packages:
if pkg.buffer and not raw:
data['packages'][pkg.packageid] = vdf.binary_loads(pkg.buffer[4:]).get(str(pkg.packageid), {})
else:
data['packages'][pkg.packageid] = {}
data['packages'][pkg.packageid]['_missing_token'] = pkg.missing_token
data['packages'][pkg.packageid]['_change_number'] = pkg.change_number
data['packages'][pkg.packageid]['_sha'] = hexlify(pkg.sha).decode('ascii')
data['packages'][pkg.packageid]['_size'] = pkg.size
if pkg.buffer and raw:
data['packages'][pkg.packageid]['_buffer'] = pkg.buffer
if not chunk.response_pending:
break
return data
def get_changes_since(self, change_number, app_changes=True, package_changes=False):
"""Get changes since a change number
:param change_number: change number to use as stating point
:type change_number: :class:`int`
:param app_changes: whether to inclued app changes
:type app_changes: :class:`bool`
:param package_changes: whether to inclued package changes
:type package_changes: :class:`bool`
:return: `CMsgClientPICSChangesSinceResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L1171-L1191>`_
:rtype: proto message instance, or :class:`None` on timeout
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientPICSChangesSinceRequest),
{
'since_change_number': change_number,
'send_app_info_changes': app_changes,
'send_package_info_changes': package_changes,
},
timeout=10
)
def get_app_ticket(self, app_id):
"""Get app ownership ticket
:param app_id: app id
:type app_id: :class:`int`
:return: `CMsgClientGetAppOwnershipTicketResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver.proto#L158-L162>`_
:rtype: proto message
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientGetAppOwnershipTicket),
{'app_id': app_id},
timeout=10
)
def get_encrypted_app_ticket(self, app_id, userdata):
"""Gets the encrypted app ticket
:param app_id: app id
:type app_id: :class:`int`
:param userdata: userdata
:type userdata: :class:`bytes`
:return: `EncryptedAppTicket <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/encrypted_app_ticket.proto>_`
:rtype: proto message
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientRequestEncryptedAppTicket),
{'app_id': app_id, 'userdata': userdata},
timeout=10
)
def get_depot_key(self, app_id, depot_id):
"""Get depot decryption key
:param app_id: app id
:type app_id: :class:`int`
:param depot_id: depot id
:type depot_id: :class:`int`
:return: `CMsgClientGetDepotDecryptionKeyResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver_2.proto#L533-L537>`_
:rtype: proto message
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientGetDepotDecryptionKey),
{
'app_id': app_id,
'depot_id': depot_id,
},
timeout=10
)
def get_cdn_auth_token(self, depot_id, hostname):
"""Get CDN authentication token
.. note::
This token is no longer needed for access to CDN files
:param depot_id: depot id
:type depot_id: :class:`int`
:param hostname: cdn hostname
:type hostname: :class:`str`
:return: `CMsgClientGetCDNAuthTokenResponse <https://github.com/ValvePython/steam/blob/39627fe883feeed2206016bacd92cf0e4580ead6/protobufs/steammessages_clientserver_2.proto#L585-L589>`_
:rtype: proto message
"""
return self.send_job_and_wait(MsgProto(EMsg.ClientGetCDNAuthToken),
{
'depot_id': depot_id,
'host_name': hostname,
},
timeout=10
)
def get_access_tokens(self, app_ids=[], package_ids=[]):
"""Get access tokens
:param app_ids: list of app ids
:type app_ids: :class:`list`
:param package_ids: list of package ids
:type package_ids: :class:`list`
:return: dict with ``apps`` and ``packages`` containing their access tokens, see example below
:rtype: :class:`dict`, :class:`None`
.. code:: python
{'apps': {123: 8888888886, ...},
'packages': {456: 6666666666, ...}
}
"""
if not app_ids and not package_ids:
return
resp = self.send_job_and_wait(MsgProto(EMsg.ClientPICSAccessTokenRequest),
{
'appids': map(int, app_ids),
'packageids': map(int, package_ids),
},
timeout=10
)
if resp:
return {'apps': dict(map(lambda app: (app.appid, app.access_token), resp.app_access_tokens)),
'packages': dict(map(lambda pkg: (pkg.packageid, pkg.access_token), resp.package_access_tokens)),
}
def register_product_key(self, key):
"""Register/Redeem a CD-Key
:param key: CD-Key
:type key: :class:`str`
:return: format ``(eresult, result_details, receipt_info)``
:rtype: :class:`tuple`
Example ``receipt_info``:
.. code:: python
{'BasePrice': 0,
'CurrencyCode': 0,
'ErrorHeadline': '',
'ErrorLinkText': '',
'ErrorLinkURL': '',
'ErrorString': '',
'LineItemCount': 1,
'PaymentMethod': 1,
'PurchaseStatus': 1,
'ResultDetail': 0,
'Shipping': 0,
'Tax': 0,
'TotalDiscount': 0,
'TransactionID': UINT_64(111111111111111111),
'TransactionTime': 1473000000,
'lineitems': {'0': {'ItemDescription': 'Half-Life 3',
'TransactionID': UINT_64(11111111111111111),
'packageid': 1234}},
'packageid': -1}
"""
resp = self.send_job_and_wait(MsgProto(EMsg.ClientRegisterKey),
{'key': key},
timeout=30,
)
if resp:
details = vdf.binary_loads(resp.purchase_receipt_info).get('MessageObject', None)
return EResult(resp.eresult), resp.purchase_result_details, details
else:
return EResult.Timeout, None, None
def request_free_license(self, app_ids):
""" Request license for free app(s)
:param app_ids: list of app ids
:type app_ids: :class:`list`
:return: format (:class:`.EResult`, result_details, receipt_info)
:rtype: :class:`tuple`
"""
resp = self.send_job_and_wait(MsgProto(EMsg.ClientRequestFreeLicense),
{'appids': map(int, app_ids)},
timeout=10,
)
if resp:
return EResult(resp.eresult), resp.granted_appids, resp.granted_packageids
else:
return EResult.Timeout, None, None
|
|
import numpy as np
import plotly.offline as py
import plotly.graph_objs as go
py.init_notebook_mode(connected=True)
def plot1d(data_list, data_names=None, x_title='', y_title='',
x_log=False, y_log=False, show_dx=True):
"""
Produce a 1D plot
@param data_list: list of traces [ [x1, y1], [x2, y2], ...]
@param data_names: name for each trace, for the legend
"""
from plotly.offline import plot
import plotly.graph_objs as go
# Create traces
if not isinstance(data_list, list):
raise RuntimeError("plot1d: data_list parameter is expected to be a list")
# Catch the case where the list is in the format [x y]
data = []
show_legend = False
if len(data_list) == 2 and not isinstance(data_list[0], list):
label = ''
if isinstance(data_names, list) and len(data_names) == 1:
label = data_names[0]
show_legend = True
data = [go.Scatter(name=label, x=data_list[0], y=data_list[1])]
else:
for i in range(len(data_list)):
label = ''
if isinstance(data_names, list) and len(data_names) == len(data_list):
label = data_names[i]
show_legend = True
err_x = {}
err_y = {}
if len(data_list[i]) >= 3:
err_y = dict(type='data', array=data_list[i][2], visible=True)
if len(data_list[i]) >= 4:
err_x = dict(type='data', array=data_list[i][3], visible=True)
if show_dx is False:
err_x['thickness'] = 0
data.append(go.Scatter(name=label, x=data_list[i][0], y=data_list[i][1],
error_x=err_x, error_y=err_y))
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=show_legend,
autosize=True,
width=600,
height=400,
margin=dict(t=40, b=40, l=80, r=40),
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, show_link=False)
def plot_heatmap(x, y, z, x_title='', y_title='', surface=False,
x_log=False, y_log=False):
"""
Produce a 2D plot
"""
from plotly.offline import plot
import plotly.graph_objs as go
x_layout = dict(title=x_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if x_log:
x_layout['type'] = 'log'
y_layout = dict(title=y_title, zeroline=False, exponentformat="power",
showexponent="all", showgrid=True,
showline=True, mirror="all", ticks="inside")
if y_log:
y_layout['type'] = 'log'
layout = go.Layout(
showlegend=False,
autosize=True,
width=600,
height=500,
margin=dict(t=40, b=40, l=80, r=40),
hovermode='closest',
bargap=0,
xaxis=x_layout,
yaxis=y_layout
)
colorscale=[
[0, "rgb(0,0,131)"], [0.125, "rgb(0,60,170)"], [0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"], [0.875, "rgb(250,0,0)"], [1, "rgb(128,0,0)"]
]
plot_type = 'surface' if surface else 'heatmap'
trace = go.Heatmap(z=z, x=x, y=y, autocolorscale=False,# type=plot_type,
hoverinfo="x+y+z", colorscale=colorscale)
fig = go.Figure(data=[trace], layout=layout)
py.iplot(fig, show_link=False)
def fill_dict(accum_dict, value):
if value[0] in ['#', 'File']:
accum_dict[value[0]] = value[1]
elif value[0] in ['#', 'DB_ID', 'P0', 'PN', 'dpix', 'number']:
accum_dict[value[0]] = int(value[1])
elif value[0] == 'extract_fan':
accum_dict[value[0]] = value[1] == 'True'
else:
accum_dict[value[0]] = float(value[1])
return accum_dict
def read_settings(file_path):
DATA_BLOCK = 0
DIRECT_BEAM_BLOCK = 1
DATA_RUN_BLOCK = 2
DIRECT_BEAM_HEADERS = ['#', 'DB_ID', 'P0', 'PN', 'x_pos', 'x_width',
'y_pos', 'y_width', 'bg_pos', 'bg_width',
'dpix', 'tth', 'number', 'File']
DATA_RUN_HEADERS = ['#', 'scale', 'P0', 'PN', 'x_pos', 'x_width',
'y_pos', 'y_width', 'bg_pos', 'bg_width',
'extract_fan', 'dpix', 'tth', 'number', 'DB_ID', 'File']
reduction_settings = {'direct_beam_runs': [], 'data_runs': [], 'process_type': 'Specular'}
fd = open(file_path, 'r')
current_block = DATA_BLOCK
for line in fd.readlines():
if "# Type:" in line:
toks = line.strip().split()
reduction_settings['process_type'] = toks[2]
continue
elif "[Direct Beam Runs]" in line:
current_block = DIRECT_BEAM_BLOCK
continue
elif "[Data Runs]" in line:
current_block = DATA_RUN_BLOCK
continue
elif "[Data]" in line:
break
if line.startswith('#') and current_block == DIRECT_BEAM_BLOCK:
# Skip the column names
if line.startswith('# DB_ID'):
continue
toks = line.strip().split()
if len(toks) == len(DIRECT_BEAM_HEADERS):
settings_dict = reduce(fill_dict, zip(DIRECT_BEAM_HEADERS, toks), {})
reduction_settings['direct_beam_runs'].append(settings_dict)
elif line.startswith('#') and current_block == DATA_RUN_BLOCK:
# Skip the column names
if line.startswith('# scale'):
continue
toks = line.strip().split()
if len(toks) == len(DATA_RUN_HEADERS):
settings_dict = reduce(fill_dict, zip(DATA_RUN_HEADERS, toks), {})
reduction_settings['data_runs'].append(settings_dict)
return reduction_settings
def find_peaks(workspace, x_min=50, x_max=250):
""" Find reflectivity peaks """
roi=RefRoi(InputWorkspace=workspace, NXPixel=304, NYPixel=256, XPixelMin=50, XPixelMax=303,
YPixelMin=0, YPixelMax=255, IntegrateY=True, ConvertToQ=False)
peaks = Transpose(InputWorkspace=roi)
peaks = CropWorkspace(InputWorkspace=peaks, XMin=x_min, XMax=x_max)
output = LRPeakSelection(InputWorkspace=peaks)
x_peak = (output[0][0]+x_min, output[0][1]+x_min)
roi=RefRoi(InputWorkspace=workspace, NXPixel=304, NYPixel=256, XPixelMin=0, XPixelMax=303,
YPixelMin=0, YPixelMax=255, IntegrateY=False, ConvertToQ=False)
peaks2 = Transpose(InputWorkspace=roi)
peaks2 = CropWorkspace(InputWorkspace=peaks2, XMin=0, XMax=250)
output = LRPeakSelection(InputWorkspace=peaks2)
y_peak = output[1]
return x_peak, y_peak
def process_run(run_number, settings, direct_beam=True):
""" Process a run """
ws = LoadEventNexus(Filename="REF_M%s" % run_number, NXentryName="entry-Off_Off",
OutputWorkspace="%s_%s" % ("REF_M", run_number))
dirpix = ws.getRun()['DIRPIX'].value[0]
x_max = 250 if direct_beam else dirpix - 30
x_peak, y_peak = find_peaks(ws, x_max=x_max)
r_max = settings['x_pos'] + settings['x_width']/2.0
r_min = settings['x_pos'] - settings['x_width']/2.0
low_max = settings['y_pos'] + settings['y_width']/2.0
low_min = settings['y_pos'] - settings['y_width']/2.0
print("r%s - PEAK: [%s %s] Input: [%s %s]" % (run_number, x_peak[0], x_peak[1], r_min, r_max))
print("r%s - LOW: [%s %s] Input: [%s %s]" % (run_number, y_peak[0], y_peak[1], low_min, low_max))
|
|
# This file is part of beets.
# Copyright 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from beets import plugins
from beets import util
from beets import ui
from beets.dbcore import types
from beets.ui.commands import _do_query
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import os
# These "safe" types can avoid the format/parse cycle that most fields go
# through: they are safe to edit with native YAML types.
SAFE_TYPES = (types.Float, types.Integer, types.Boolean)
class ParseError(Exception):
"""The modified file is unreadable. The user should be offered a chance to
fix the error.
"""
def edit(filename):
"""Open `filename` in a text editor.
"""
cmd = util.shlex_split(util.editor_command())
cmd.append(filename)
subprocess.call(cmd)
def dump(arg):
"""Dump a sequence of dictionaries as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a sequence of YAML documents back to a list of dictionaries
with string keys.
Can raise a `ParseError`.
"""
try:
out = []
for d in yaml.load_all(s):
if not isinstance(d, dict):
raise ParseError(
'each entry must be a dictionary; found {}'.format(
type(d).__name__
)
)
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({unicode(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError('invalid YAML: {}'.format(e))
return out
def _safe_value(obj, key, value):
"""Check whether the `value` is safe to represent in YAML and trust as
returned from parsed YAML.
This ensures that values do not change their type when the user edits their
YAML representation.
"""
typ = obj._type(key)
return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
def flatten(obj, fields):
"""Represent `obj`, a `dbcore.Model` object, as a dictionary for
serialization. Only include the given `fields` if provided;
otherwise, include everything.
The resulting dictionary's keys are strings and the values are
safely YAML-serializable types.
"""
# Format each value.
d = {}
for key in obj.keys():
value = obj[key]
if _safe_value(obj, key, value):
# A safe value that is faithfully representable in YAML.
d[key] = value
else:
# A value that should be edited as a string.
d[key] = obj.formatted()[key]
# Possibly filter field names.
if fields:
return {k: v for k, v in d.items() if k in fields}
else:
return d
def apply(obj, data):
"""Set the fields of a `dbcore.Model` object according to a
dictionary.
This is the opposite of `flatten`. The `data` dictionary should have
strings as values.
"""
for key, value in data.items():
if _safe_value(obj, key, value):
# A safe value *stayed* represented as a safe type. Assign it
# directly.
obj[key] = value
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, unicode(value))
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super(EditPlugin, self).__init__()
self.config.add({
# The default fields to edit.
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
# Silently ignore any changes to these fields.
'ignore_fields': 'id path',
})
def commands(self):
edit_command = ui.Subcommand(
'edit',
help='interactively edit metadata'
)
edit_command.parser.add_option(
'-f', '--field',
metavar='FIELD',
action='append',
help='edit this field also',
)
edit_command.parser.add_option(
'--all',
action='store_true', dest='all',
help='edit all fields',
)
edit_command.parser.add_album_option()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command.
"""
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_('Nothing to edit.')
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self._get_fields(opts.album, opts.field)
self.edit(opts.album, objs, fields)
def _get_fields(self, album, extra):
"""Get the set of fields to edit.
"""
# Start with the configured base fields.
if album:
fields = self.config['albumfields'].as_str_seq()
else:
fields = self.config['itemfields'].as_str_seq()
# Add the requested extra fields.
if extra:
fields += extra
# Ensure we always have the `id` field for identification.
fields.append('id')
return set(fields)
def edit(self, album, objs, fields):
"""The core editor function.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit (or None to edit
everything).
"""
# Present the YAML to the user and let her change it.
success = self.edit_objects(objs, fields)
# Save the new data.
if success:
self.save_changes(objs)
def edit_objects(self, objs, fields):
"""Dump a set of Model objects to a file as text, ask the user
to edit it, and apply any changes to the objects.
Return a boolean indicating whether the edit succeeded.
"""
# Get the content to edit as raw data structures.
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
old_str = dump(old_data)
new.write(old_str)
new.close()
# Loop until we have parseable data and the user confirms.
try:
while True:
# Ask the user to edit the data.
edit(new.name)
# Read the data back after editing and check whether anything
# changed.
with open(new.name) as f:
new_str = f.read()
if new_str == old_str:
ui.print_("No changes; aborting.")
return False
# Parse the updated data.
try:
new_data = load(new_str)
except ParseError as e:
ui.print_("Could not read data: {}".format(e))
if ui.input_yn("Edit again to fix? (Y/n)", True):
continue
else:
return False
# Show the changes.
self.apply_data(objs, old_data, new_data)
changed = False
for obj in objs:
changed |= ui.show_model_changes(obj)
if not changed:
ui.print_('No changes to apply.')
return False
# Confirm the changes.
choice = ui.input_options(
('continue Editing', 'apply', 'cancel')
)
if choice == 'a': # Apply.
return True
elif choice == 'c': # Cancel.
return False
elif choice == 'e': # Keep editing.
# Reset the temporary changes to the objects.
for obj in objs:
obj.read()
continue
# Remove the temporary file before returning.
finally:
os.remove(new.name)
def apply_data(self, objs, old_data, new_data):
"""Take potentially-updated data and apply it to a set of Model
objects.
The objects are not written back to the database, so the changes
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warn('number of objects changed from {} to {}',
len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs}
ignore_fields = self.config['ignore_fields'].as_str_seq()
for old_dict, new_dict in zip(old_data, new_data):
# Prohibit any changes to forbidden fields to avoid
# clobbering `id` and such by mistake.
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warn('ignoring object whose {} changed', key)
forbidden = True
break
if forbidden:
continue
id = int(old_dict['id'])
apply(obj_by_id[id], new_dict)
def save_changes(self, objs):
"""Save a list of updated Model objects to the database.
"""
# Save to the database and possibly write tags.
for ob in objs:
if ob._dirty:
self._log.debug('saving changes to {}', ob)
ob.try_sync(ui.should_write(), ui.should_move())
|
|
#!/usr/bin/env python
# pylint: disable=missing-docstring
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
# pylint: disable=wrong-import-order,wrong-import-position,unused-import
from __future__ import print_function # noqa: F401
import copy # noqa: F401
import json # noqa: F401
import os # noqa: F401
import re # noqa: F401
import shutil # noqa: F401
import tempfile # noqa: F401
import time # noqa: F401
try:
import ruamel.yaml as yaml # noqa: F401
except ImportError:
import yaml # noqa: F401
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/yedit -*- -*- -*-
DOCUMENTATION = '''
---
module: yedit
short_description: Create, modify, and idempotently manage yaml files.
description:
- Modify yaml files programmatically.
options:
state:
description:
- State represents whether to create, modify, delete, or list yaml
required: true
default: present
choices: ["present", "absent", "list"]
aliases: []
debug:
description:
- Turn on debug information.
required: false
default: false
aliases: []
src:
description:
- The file that is the target of the modifications.
required: false
default: None
aliases: []
content:
description:
- Content represents the yaml content you desire to work with. This
- could be the file contents to write or the inmemory data to modify.
required: false
default: None
aliases: []
content_type:
description:
- The python type of the content parameter.
required: false
default: 'dict'
aliases: []
key:
description:
- The path to the value you wish to modify. Emtpy string means the top of
- the document.
required: false
default: ''
aliases: []
value:
description:
- The incoming value of parameter 'key'.
required: false
default:
aliases: []
value_type:
description:
- The python type of the incoming value.
required: false
default: ''
aliases: []
update:
description:
- Whether the update should be performed on a dict/hash or list/array
- object.
required: false
default: false
aliases: []
append:
description:
- Whether to append to an array/list. When the key does not exist or is
- null, a new array is created. When the key is of a non-list type,
- nothing is done.
required: false
default: false
aliases: []
index:
description:
- Used in conjunction with the update parameter. This will update a
- specific index in an array/list.
required: false
default: false
aliases: []
curr_value:
description:
- Used in conjunction with the update parameter. This is the current
- value of 'key' in the yaml file.
required: false
default: false
aliases: []
curr_value_format:
description:
- Format of the incoming current value.
choices: ["yaml", "json", "str"]
required: false
default: false
aliases: []
backup:
description:
- Whether to make a backup copy of the current file when performing an
- edit.
required: false
default: true
aliases: []
separator:
description:
- The separator being used when parsing strings.
required: false
default: '.'
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
# Simple insert of key, value
- name: insert simple key, value
yedit:
src: somefile.yml
key: test
value: somevalue
state: present
# Results:
# test: somevalue
# Multilevel insert of key, value
- name: insert simple key, value
yedit:
src: somefile.yml
key: a#b#c
value: d
state: present
# Results:
# a:
# b:
# c: d
#
# multiple edits at the same time
- name: perform multiple edits
yedit:
src: somefile.yml
edits:
- key: a#b#c
value: d
- key: a#b#c#d
value: e
state: present
# Results:
# a:
# b:
# c:
# d: e
'''
# -*- -*- -*- End included fragment: doc/yedit -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/yedit.py -*- -*- -*-
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/yedit.py -*- -*- -*-
# pylint: disable=too-many-branches
def main():
''' ansible oc module for secrets '''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
src=dict(default=None, type='str'),
content=dict(default=None),
content_type=dict(default='dict', choices=['dict']),
key=dict(default='', type='str'),
value=dict(),
value_type=dict(default='', type='str'),
update=dict(default=False, type='bool'),
append=dict(default=False, type='bool'),
index=dict(default=None, type='int'),
curr_value=dict(default=None, type='str'),
curr_value_format=dict(default='yaml',
choices=['yaml', 'json', 'str'],
type='str'),
backup=dict(default=True, type='bool'),
separator=dict(default='.', type='str'),
edits=dict(default=None, type='list'),
),
mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
required_one_of=[["content", "src"]],
)
# Verify we recieved either a valid key or edits with valid keys when receiving a src file.
# A valid key being not None or not ''.
if module.params['src'] is not None:
key_error = False
edit_error = False
if module.params['key'] in [None, '']:
key_error = True
if module.params['edits'] in [None, []]:
edit_error = True
else:
for edit in module.params['edits']:
if edit.get('key') in [None, '']:
edit_error = True
break
if key_error and edit_error:
module.fail_json(failed=True, msg='Empty value for parameter key not allowed.')
rval = Yedit.run_ansible(module.params)
if 'failed' in rval and rval['failed']:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/yedit.py -*- -*- -*-
|
|
import json
from tempodb.temporal.validate import convert_iso_stamp, check_time_param
from cursor import DataPointCursor, SeriesCursor, SingleValueCursor
class JSONSerializable(object):
"""Base class for objects that are serializable to and from JSON.
This class defines default methods for serializing each way that use
the class's "properties" class variable to determine what should be
serialized or deserialized. For example::
class MySerialized(JSONSerializable)
properties = ['foo', 'bar']
This would define a class that expects to have the 'foo' and 'bar'
keys in JSON data and would likewise serialize a JSON object with
those keys.
The base constructor calls the :meth:`from_json` method, which
enforces these constraints for object construction. If you override
this constructor (for example, to provide static initialization of
some variables), it is highly recommended that the subclass constructor
call this constructor at some point through super().
:param string json_text: the JSON string to deserialize from"""
properties = []
def __init__(self, json_text, response):
self.from_json(json_text)
self.response = response
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
#due to the architecture of response parsing, particularly
#where the API returns lists, the JSON might have already been
#parsed by the time it gets here
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
setattr(self, p, j[p])
except KeyError, e:
msg = 'Expected key %s in JSON object, found None' % str(e)
raise ValueError(msg)
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
j[p] = getattr(self, p)
return json.dumps(j)
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
j[p] = getattr(self, p)
return j
#PLACEHOLDER FOR EMPTY RESPONSES
class Nothing(object):
"""Used to represent empty responses. This class should not be
used directly in user code."""
def __init__(self, *args, **kwargs):
pass
class SeriesSet(object):
"""Represents a set of Series objects as returned by the list series
TempoDB API endpoint. The SeriesSet exposes a cursor that can be iterated
over to examine each series return by the API."""
def __init__(self, json_text, response):
self.data = json.loads(json_text)
self.cursor = SeriesCursor(self.data, Series, response)
class Series(JSONSerializable):
"""Represents a Series object from the TempoDB API. Series objects
are serialized to and from JSON using the :meth:`to_json` and
:meth:`from_json` methods.
Domain object attributes:
* key: string
* name: string
* tags: list
* attributes: dictionary"""
properties = ['key', 'name', 'tags', 'attributes']
def __init__(self, json_text, response):
#the formatting of the series object returned from the series by key
#endpoint is slightly different
if isinstance(json_text, basestring):
j = json.loads(json_text)
else:
j = json_text
if 'series' in j:
self.from_json(j['series'])
else:
self.from_json(json_text)
self.response = response
class DataSet(JSONSerializable):
"""Represents a data set returned using the /data resource in the
TempoDB API. Depending on the original API call, some attributes of
this object (such as rollup) could be None."""
properties = ['data', 'rollup', 'tz']
def __init__(self, json_text, response):
#override to force the instantiation of a cursor
super(DataSet, self).__init__(json_text, response)
self.cursor = DataPointCursor(self.data, DataPoint, response)
if self.rollup is not None:
self.rollup = Rollup(self.rollup)
class SingleValue(JSONSerializable):
"""Represents a data set returned by calling the single value
endpoint of the TempoDB API. This domain object is not cursored, so
it is implemented separately from the more generic DataSet object.
Domain object attributes:
* series: :class:`Series` object
* data: :class:`DataPoint` object"""
properties = ['series', 'data']
def __init__(self, json_text, response):
#force conversion of the subobjects in this datatype after we get
#them
super(SingleValue, self).__init__(json_text, response)
self.series = Series(self.series, response)
if self.data is not None:
self.data = DataPoint(self.data, response, self.data.get('tz'))
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
j['series'] = self.series.to_dictionary()
j['data'] = self.data.to_dictionary()
return j
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
class SeriesSummary(JSONSerializable):
properties = ['series', 'summary', 'tz', 'start', 'end']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(SeriesSummary, self).__init__(json_text, response)
self.series = Series(self.series, response)
self.summary = Summary(self.summary, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p in ['start', 'end']:
val = convert_iso_stamp(j[p], self.tz)
setattr(self, p, val)
else:
setattr(self, p, j[p])
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
d = {'start': self.start.isoformat(),
'end': self.end.isoformat(),
'tz': self.tz,
'summary': self.summary.to_dictionary(),
'series': self.series.to_dictionary()
}
return d
class Summary(JSONSerializable):
"""Represents the summary received from the TempoDB API when a data read
request is sent. The properties are summary statistics for the dataset
returned."""
properties = ['mean', 'sum', 'min', 'max', 'stddev', 'count']
class Rollup(JSONSerializable):
"""Represents the rollup information returned from the TempoDB API when
the API calls demands it."""
properties = ['interval', 'function', 'tz']
class DataPoint(JSONSerializable):
"""Represents a single data point in a series. To construct these objects
in user code, use the class method :meth:`from_data`.
Domain object attributes:
* t: DateTime object
* v: int or float
* key: string (only present when writing DataPoints)
* id: string (only present when writing DataPoints)"""
properties = ['t', 'v', 'key', 'id']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(DataPoint, self).__init__(json_text, response)
@classmethod
def from_data(self, time, value, series_id=None, key=None, tz=None):
"""Create a DataPoint object from data, rather than a JSON object or
string. This should be used by user code to construct DataPoints from
Python-based data like Datetime objects and floats.
The series_id and key arguments are only necessary if you are doing a
multi write, in which case those arguments can be used to specify which
series the DataPoint belongs to.
If needed, the tz argument should be an Olsen database compliant string
specifying the time zone for this DataPoint. This argument is most
often used internally when reading data from TempoDB.
:param time: the point in time for this reading
:type time: ISO8601 string or Datetime
:param value: the value for this reading
:type value: int or float
:param string series_id: (optional) a series ID for this point
:param string key: (optional) a key for this point
:param string tz: (optional) a timezone for this point
:rtype: :class:`DataPoint`"""
t = check_time_param(time)
if type(value) in [float, int]:
v = value
else:
raise ValueError('Values must be int or float. Got "%s".' %
str(value))
j = {
't': t,
'v': v,
'id': series_id,
'key': key
}
return DataPoint(j, None, tz=tz)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 't':
val = convert_iso_stamp(j[p], self.tz)
setattr(self, p, val)
else:
setattr(self, p, j[p])
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
#this logic change allows us to work with optional values for
#this data type
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return json.dumps(j)
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
for p in self.properties:
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return j
class DataPointFound(JSONSerializable):
"""Represents a specialized DataPoint returned by the the /find endpoint
of the TempoDB API. The start and end attributes indicate in what time
period the datapoint was found, the t attribute indicates the exact time
at which the point was found, and the v attribute indicates what the value
of the point was at that time.
Domain object attributes:
* start: DateTime object
* end: DateTime object
* v: int or long
* t: DateTime object"""
properties = ['interval', 'found']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(DataPointFound, self).__init__(json_text, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 'interval':
self.start = convert_iso_stamp(j[p]['start'], self.tz)
self.end = convert_iso_stamp(j[p]['end'], self.tz)
elif p == 'found':
t = convert_iso_stamp(j[p]['t'], self.tz)
setattr(self, 't', t)
v = j[p]['v']
setattr(self, 'v', v)
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_dictionary(self):
"""Serialize an object into dictionary form. Useful if you have to
serialize an array of objects into JSON. Otherwise, if you call the
:meth:`to_json` method on each object in the list and then try to
dump the array, you end up with an array with one string."""
j = {}
j['interval'] = {'start': self.start.isoformat(),
'end': self.end.isoformat()}
j['found'] = {'v': self.v, 't': self.t.isoformat()}
return j
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
return json.dumps(self.to_dictionary())
class MultiPoint(JSONSerializable):
"""Represents a data point with values for multiple series at a single
timestamp. Returned when performing a multi-series query. The v attribute
is a dictionary mapping series key to value.
Domain object attributes:
* t: DateTime object
* v: dictionary"""
properties = ['t', 'v']
def __init__(self, json_text, response, tz=None):
self.tz = tz
super(MultiPoint, self).__init__(json_text, response)
def from_json(self, json_text):
"""Deserialize a JSON object into this object. This method will
check that the JSON object has the required keys and will set each
of the keys in that JSON object as an instance attribute of this
object.
:param json_text: the JSON text or object to deserialize from
:type json_text: dict or string
:raises ValueError: if the JSON object lacks an expected key
:rtype: None"""
if type(json_text) in [str, unicode]:
j = json.loads(json_text)
else:
j = json_text
try:
for p in self.properties:
if p == 't':
t = convert_iso_stamp(j[p], self.tz)
setattr(self, 't', t)
else:
setattr(self, p, j[p])
#overriding this exception allows us to handle optional values like
#id and key which are only present during particular API calls like
#multi writes
except KeyError:
pass
def to_json(self):
"""Serialize an object to JSON based on its "properties" class
attribute.
:rtype: string"""
j = {}
for p in self.properties:
try:
v = getattr(self, p)
except AttributeError:
continue
if v is not None:
if p == 't':
j[p] = getattr(self, p).isoformat()
else:
j[p] = getattr(self, p)
return json.dumps(j)
def get(self, k):
"""Convenience method for getting values for individual series out of
the MultiPoint. This is equivalent to calling::
>>> point.v.get('foo')
:param string k: the key to read
:rtype: number"""
return self.v.get(k)
|
|
# Verify that gdb can pretty-print the various PyObject* types
#
# The code for testing gdb was adapted from similar work in Unladen Swallow's
# Lib/test/test_jit_gdb.py
import os
import re
import subprocess
import sys
import sysconfig
import unittest
import locale
# Is this Python configured to support threads?
try:
import _thread
except ImportError:
_thread = None
from test.support import run_unittest, findfile, python_is_optimized
try:
gdb_version, _ = subprocess.Popen(["gdb", "--version"],
stdout=subprocess.PIPE).communicate()
except OSError:
# This is what "no gdb" looks like. There may, however, be other
# errors that manifest this way too.
raise unittest.SkipTest("Couldn't find gdb on the path")
gdb_version_number = re.search(b"^GNU gdb [^\d]*(\d+)\.(\d)", gdb_version)
gdb_major_version = int(gdb_version_number.group(1))
gdb_minor_version = int(gdb_version_number.group(2))
if gdb_major_version < 7:
raise unittest.SkipTest("gdb versions before 7.0 didn't support python embedding"
" Saw:\n" + gdb_version.decode('ascii', 'replace'))
if not sysconfig.is_python_build():
raise unittest.SkipTest("test_gdb only works on source builds at the moment.")
# Location of custom hooks file in a repository checkout.
checkout_hook_path = os.path.join(os.path.dirname(sys.executable),
'python-gdb.py')
def run_gdb(*args, **env_vars):
"""Runs gdb in --batch mode with the additional arguments given by *args.
Returns its (stdout, stderr) decoded from utf-8 using the replace handler.
"""
if env_vars:
env = os.environ.copy()
env.update(env_vars)
else:
env = None
base_cmd = ('gdb', '--batch')
if (gdb_major_version, gdb_minor_version) >= (7, 4):
base_cmd += ('-iex', 'add-auto-load-safe-path ' + checkout_hook_path)
out, err = subprocess.Popen(base_cmd + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env,
).communicate()
return out.decode('utf-8', 'replace'), err.decode('utf-8', 'replace')
# Verify that "gdb" was built with the embedded python support enabled:
gdbpy_version, _ = run_gdb("--eval-command=python import sys; print(sys.version_info)")
if not gdbpy_version:
raise unittest.SkipTest("gdb not built with embedded python support")
# Verify that "gdb" can load our custom hooks, as OS security settings may
# disallow this without a customised .gdbinit.
cmd = ['--args', sys.executable]
_, gdbpy_errors = run_gdb('--args', sys.executable)
if "auto-loading has been declined" in gdbpy_errors:
msg = "gdb security settings prevent use of custom hooks: "
raise unittest.SkipTest(msg + gdbpy_errors.rstrip())
def gdb_has_frame_select():
# Does this build of gdb have gdb.Frame.select ?
stdout, _ = run_gdb("--eval-command=python print(dir(gdb.Frame))")
m = re.match(r'.*\[(.*)\].*', stdout)
if not m:
raise unittest.SkipTest("Unable to parse output from gdb.Frame.select test")
gdb_frame_dir = m.group(1).split(', ')
return "'select'" in gdb_frame_dir
HAS_PYUP_PYDOWN = gdb_has_frame_select()
BREAKPOINT_FN='builtin_id'
class DebuggerTests(unittest.TestCase):
"""Test that the debugger can debug Python."""
def get_stack_trace(self, source=None, script=None,
breakpoint=BREAKPOINT_FN,
cmds_after_breakpoint=None,
import_site=False):
'''
Run 'python -c SOURCE' under gdb with a breakpoint.
Support injecting commands after the breakpoint is reached
Returns the stdout from gdb
cmds_after_breakpoint: if provided, a list of strings: gdb commands
'''
# We use "set breakpoint pending yes" to avoid blocking with a:
# Function "foo" not defined.
# Make breakpoint pending on future shared library load? (y or [n])
# error, which typically happens python is dynamically linked (the
# breakpoints of interest are to be found in the shared library)
# When this happens, we still get:
# Function "textiowrapper_write" not defined.
# emitted to stderr each time, alas.
# Initially I had "--eval-command=continue" here, but removed it to
# avoid repeated print breakpoints when traversing hierarchical data
# structures
# Generate a list of commands in gdb's language:
commands = ['set breakpoint pending yes',
'break %s' % breakpoint,
'run']
if cmds_after_breakpoint:
commands += cmds_after_breakpoint
else:
commands += ['backtrace']
# print commands
# Use "commands" to generate the arguments with which to invoke "gdb":
args = ["gdb", "--batch"]
args += ['--eval-command=%s' % cmd for cmd in commands]
args += ["--args",
sys.executable]
if not import_site:
# -S suppresses the default 'import site'
args += ["-S"]
if source:
args += ["-c", source]
elif script:
args += [script]
# print args
# print (' '.join(args))
# Use "args" to invoke gdb, capturing stdout, stderr:
out, err = run_gdb(*args, PYTHONHASHSEED='0')
errlines = err.splitlines()
unexpected_errlines = []
# Ignore some benign messages on stderr.
ignore_patterns = (
'Function "%s" not defined.' % breakpoint,
"warning: no loadable sections found in added symbol-file"
" system-supplied DSO",
"warning: Unable to find libthread_db matching"
" inferior's thread library, thread debugging will"
" not be available.",
"warning: Cannot initialize thread debugging"
" library: Debugger service failed",
'warning: Could not load shared library symbols for '
'linux-vdso.so',
'warning: Could not load shared library symbols for '
'linux-gate.so',
'Do you need "set solib-search-path" or '
'"set sysroot"?',
)
for line in errlines:
if not line.startswith(ignore_patterns):
unexpected_errlines.append(line)
# Ensure no unexpected error messages:
self.assertEqual(unexpected_errlines, [])
return out
def get_gdb_repr(self, source,
cmds_after_breakpoint=None,
import_site=False):
# Given an input python source representation of data,
# run "python -c'id(DATA)'" under gdb with a breakpoint on
# builtin_id and scrape out gdb's representation of the "op"
# parameter, and verify that the gdb displays the same string
#
# Verify that the gdb displays the expected string
#
# For a nested structure, the first time we hit the breakpoint will
# give us the top-level structure
# NOTE: avoid decoding too much of the traceback as some
# undecodable characters may lurk there in optimized mode
# (issue #19743).
cmds_after_breakpoint = cmds_after_breakpoint or ["backtrace 1"]
gdb_output = self.get_stack_trace(source, breakpoint=BREAKPOINT_FN,
cmds_after_breakpoint=cmds_after_breakpoint,
import_site=import_site)
# gdb can insert additional '\n' and space characters in various places
# in its output, depending on the width of the terminal it's connected
# to (using its "wrap_here" function)
m = re.match('.*#0\s+builtin_id\s+\(self\=.*,\s+v=\s*(.*?)\)\s+at\s+\S*Python/bltinmodule.c.*',
gdb_output, re.DOTALL)
if not m:
self.fail('Unexpected gdb output: %r\n%s' % (gdb_output, gdb_output))
return m.group(1), gdb_output
def assertEndsWith(self, actual, exp_end):
'''Ensure that the given "actual" string ends with "exp_end"'''
self.assertTrue(actual.endswith(exp_end),
msg='%r did not end with %r' % (actual, exp_end))
def assertMultilineMatches(self, actual, pattern):
m = re.match(pattern, actual, re.DOTALL)
if not m:
self.fail(msg='%r did not match %r' % (actual, pattern))
def get_sample_script(self):
return findfile('gdb_sample.py')
class PrettyPrintTests(DebuggerTests):
def test_getting_backtrace(self):
gdb_output = self.get_stack_trace('id(42)')
self.assertTrue(BREAKPOINT_FN in gdb_output)
def assertGdbRepr(self, val, exp_repr=None):
# Ensure that gdb's rendering of the value in a debugged process
# matches repr(value) in this process:
gdb_repr, gdb_output = self.get_gdb_repr('id(' + ascii(val) + ')')
if not exp_repr:
exp_repr = repr(val)
self.assertEqual(gdb_repr, exp_repr,
('%r did not equal expected %r; full output was:\n%s'
% (gdb_repr, exp_repr, gdb_output)))
def test_int(self):
'Verify the pretty-printing of various int values'
self.assertGdbRepr(42)
self.assertGdbRepr(0)
self.assertGdbRepr(-7)
self.assertGdbRepr(1000000000000)
self.assertGdbRepr(-1000000000000000)
def test_singletons(self):
'Verify the pretty-printing of True, False and None'
self.assertGdbRepr(True)
self.assertGdbRepr(False)
self.assertGdbRepr(None)
def test_dicts(self):
'Verify the pretty-printing of dictionaries'
self.assertGdbRepr({})
self.assertGdbRepr({'foo': 'bar'})
self.assertGdbRepr({'foo': 'bar', 'douglas': 42},
"{'foo': 'bar', 'douglas': 42}")
def test_lists(self):
'Verify the pretty-printing of lists'
self.assertGdbRepr([])
self.assertGdbRepr(list(range(5)))
def test_bytes(self):
'Verify the pretty-printing of bytes'
self.assertGdbRepr(b'')
self.assertGdbRepr(b'And now for something hopefully the same')
self.assertGdbRepr(b'string with embedded NUL here \0 and then some more text')
self.assertGdbRepr(b'this is a tab:\t'
b' this is a slash-N:\n'
b' this is a slash-R:\r'
)
self.assertGdbRepr(b'this is byte 255:\xff and byte 128:\x80')
self.assertGdbRepr(bytes([b for b in range(255)]))
def test_strings(self):
'Verify the pretty-printing of unicode strings'
encoding = locale.getpreferredencoding()
def check_repr(text):
try:
text.encode(encoding)
printable = True
except UnicodeEncodeError:
self.assertGdbRepr(text, ascii(text))
else:
self.assertGdbRepr(text)
self.assertGdbRepr('')
self.assertGdbRepr('And now for something hopefully the same')
self.assertGdbRepr('string with embedded NUL here \0 and then some more text')
# Test printing a single character:
# U+2620 SKULL AND CROSSBONES
check_repr('\u2620')
# Test printing a Japanese unicode string
# (I believe this reads "mojibake", using 3 characters from the CJK
# Unified Ideographs area, followed by U+3051 HIRAGANA LETTER KE)
check_repr('\u6587\u5b57\u5316\u3051')
# Test a character outside the BMP:
# U+1D121 MUSICAL SYMBOL C CLEF
# This is:
# UTF-8: 0xF0 0x9D 0x84 0xA1
# UTF-16: 0xD834 0xDD21
check_repr(chr(0x1D121))
def test_tuples(self):
'Verify the pretty-printing of tuples'
self.assertGdbRepr(tuple())
self.assertGdbRepr((1,), '(1,)')
self.assertGdbRepr(('foo', 'bar', 'baz'))
def test_sets(self):
'Verify the pretty-printing of sets'
self.assertGdbRepr(set())
self.assertGdbRepr(set(['a', 'b']), "{'a', 'b'}")
self.assertGdbRepr(set([4, 5, 6]), "{4, 5, 6}")
# Ensure that we handle sets containing the "dummy" key value,
# which happens on deletion:
gdb_repr, gdb_output = self.get_gdb_repr('''s = set(['a','b'])
s.pop()
id(s)''')
self.assertEqual(gdb_repr, "{'b'}")
def test_frozensets(self):
'Verify the pretty-printing of frozensets'
self.assertGdbRepr(frozenset())
self.assertGdbRepr(frozenset(['a', 'b']), "frozenset({'a', 'b'})")
self.assertGdbRepr(frozenset([4, 5, 6]), "frozenset({4, 5, 6})")
def test_exceptions(self):
# Test a RuntimeError
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
raise RuntimeError("I am an error")
except RuntimeError as e:
id(e)
''')
self.assertEqual(gdb_repr,
"RuntimeError('I am an error',)")
# Test division by zero:
gdb_repr, gdb_output = self.get_gdb_repr('''
try:
a = 1 / 0
except ZeroDivisionError as e:
id(e)
''')
self.assertEqual(gdb_repr,
"ZeroDivisionError('division by zero',)")
def test_modern_class(self):
'Verify the pretty-printing of new-style class instances'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_list(self):
'Verify the pretty-printing of an instance of a list subclass'
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(list):
pass
foo = Foo()
foo += [1, 2, 3]
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def test_subclassing_tuple(self):
'Verify the pretty-printing of an instance of a tuple subclass'
# This should exercise the negative tp_dictoffset code in the
# new-style class support
gdb_repr, gdb_output = self.get_gdb_repr('''
class Foo(tuple):
pass
foo = Foo((1, 2, 3))
foo.an_int = 42
id(foo)''')
m = re.match(r'<Foo\(an_int=42\) at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected new-style class rendering %r' % gdb_repr)
def assertSane(self, source, corruption, exprepr=None):
'''Run Python under gdb, corrupting variables in the inferior process
immediately before taking a backtrace.
Verify that the variable's representation is the expected failsafe
representation'''
if corruption:
cmds_after_breakpoint=[corruption, 'backtrace']
else:
cmds_after_breakpoint=['backtrace']
gdb_repr, gdb_output = \
self.get_gdb_repr(source,
cmds_after_breakpoint=cmds_after_breakpoint)
if exprepr:
if gdb_repr == exprepr:
# gdb managed to print the value in spite of the corruption;
# this is good (see http://bugs.python.org/issue8330)
return
# Match anything for the type name; 0xDEADBEEF could point to
# something arbitrary (see http://bugs.python.org/issue8330)
pattern = '<.* at remote 0x-?[0-9a-f]+>'
m = re.match(pattern, gdb_repr)
if not m:
self.fail('Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_NULL_ptr(self):
'Ensure that a NULL PyObject* is handled gracefully'
gdb_repr, gdb_output = (
self.get_gdb_repr('id(42)',
cmds_after_breakpoint=['set variable v=0',
'backtrace'])
)
self.assertEqual(gdb_repr, '0x0')
def test_NULL_ob_type(self):
'Ensure that a PyObject* with NULL ob_type is handled gracefully'
self.assertSane('id(42)',
'set v->ob_type=0')
def test_corrupt_ob_type(self):
'Ensure that a PyObject* with a corrupt ob_type is handled gracefully'
self.assertSane('id(42)',
'set v->ob_type=0xDEADBEEF',
exprepr='42')
def test_corrupt_tp_flags(self):
'Ensure that a PyObject* with a type with corrupt tp_flags is handled'
self.assertSane('id(42)',
'set v->ob_type->tp_flags=0x0',
exprepr='42')
def test_corrupt_tp_name(self):
'Ensure that a PyObject* with a type with corrupt tp_name is handled'
self.assertSane('id(42)',
'set v->ob_type->tp_name=0xDEADBEEF',
exprepr='42')
def test_builtins_help(self):
'Ensure that the new-style class _Helper in site.py can be handled'
# (this was the issue causing tracebacks in
# http://bugs.python.org/issue8032#msg100537 )
gdb_repr, gdb_output = self.get_gdb_repr('id(__builtins__.help)', import_site=True)
m = re.match(r'<_Helper at remote 0x-?[0-9a-f]+>', gdb_repr)
self.assertTrue(m,
msg='Unexpected rendering %r' % gdb_repr)
def test_selfreferential_list(self):
'''Ensure that a reference loop involving a list doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; a.append(a) ; id(a)")
self.assertEqual(gdb_repr, '[3, 4, 5, [...]]')
gdb_repr, gdb_output = \
self.get_gdb_repr("a = [3, 4, 5] ; b = [a] ; a.append(b) ; id(a)")
self.assertEqual(gdb_repr, '[3, 4, 5, [[...]]]')
def test_selfreferential_dict(self):
'''Ensure that a reference loop involving a dict doesn't lead proxyval
into an infinite loop:'''
gdb_repr, gdb_output = \
self.get_gdb_repr("a = {} ; b = {'bar':a} ; a['foo'] = b ; id(a)")
self.assertEqual(gdb_repr, "{'foo': {'bar': {...}}}")
def test_selfreferential_old_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo:
pass
foo = Foo()
foo.an_attr = foo
id(foo)''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_selfreferential_new_style_instance(self):
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
foo = Foo()
foo.an_attr = foo
id(foo)''')
self.assertTrue(re.match('<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
gdb_repr, gdb_output = \
self.get_gdb_repr('''
class Foo(object):
pass
a = Foo()
b = Foo()
a.an_attr = b
b.an_attr = a
id(a)''')
self.assertTrue(re.match('<Foo\(an_attr=<Foo\(an_attr=<\.\.\.>\) at remote 0x-?[0-9a-f]+>\) at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_truncation(self):
'Verify that very long output is truncated'
gdb_repr, gdb_output = self.get_gdb_repr('id(list(range(1000)))')
self.assertEqual(gdb_repr,
"[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, "
"14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, "
"27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, "
"40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, "
"53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, "
"66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, "
"79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, "
"92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, "
"104, 105, 106, 107, 108, 109, 110, 111, 112, 113, "
"114, 115, 116, 117, 118, 119, 120, 121, 122, 123, "
"124, 125, 126, 127, 128, 129, 130, 131, 132, 133, "
"134, 135, 136, 137, 138, 139, 140, 141, 142, 143, "
"144, 145, 146, 147, 148, 149, 150, 151, 152, 153, "
"154, 155, 156, 157, 158, 159, 160, 161, 162, 163, "
"164, 165, 166, 167, 168, 169, 170, 171, 172, 173, "
"174, 175, 176, 177, 178, 179, 180, 181, 182, 183, "
"184, 185, 186, 187, 188, 189, 190, 191, 192, 193, "
"194, 195, 196, 197, 198, 199, 200, 201, 202, 203, "
"204, 205, 206, 207, 208, 209, 210, 211, 212, 213, "
"214, 215, 216, 217, 218, 219, 220, 221, 222, 223, "
"224, 225, 226...(truncated)")
self.assertEqual(len(gdb_repr),
1024 + len('...(truncated)'))
def test_builtin_method(self):
gdb_repr, gdb_output = self.get_gdb_repr('import sys; id(sys.stdout.readlines)')
self.assertTrue(re.match('<built-in method readlines of _io.TextIOWrapper object at remote 0x-?[0-9a-f]+>',
gdb_repr),
'Unexpected gdb representation: %r\n%s' % \
(gdb_repr, gdb_output))
def test_frames(self):
gdb_output = self.get_stack_trace('''
def foo(a, b, c):
pass
foo(3, 4, 5)
id(foo.__code__)''',
breakpoint='builtin_id',
cmds_after_breakpoint=['print (PyFrameObject*)(((PyCodeObject*)v)->co_zombieframe)']
)
self.assertTrue(re.match('.*\s+\$1 =\s+Frame 0x-?[0-9a-f]+, for file <string>, line 3, in foo \(\)\s+.*',
gdb_output,
re.DOTALL),
'Unexpected gdb representation: %r\n%s' % (gdb_output, gdb_output))
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
class PyListTests(DebuggerTests):
def assertListing(self, expected, actual):
self.assertEndsWith(actual, expected)
def test_basic_command(self):
'Verify that the "py-list" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list'])
self.assertListing(' 5 \n'
' 6 def bar(a, b, c):\n'
' 7 baz(a, b, c)\n'
' 8 \n'
' 9 def baz(*args):\n'
' >10 id(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_one_abs_arg(self):
'Verify the "py-list" command with one absolute argument'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 9'])
self.assertListing(' 9 def baz(*args):\n'
' >10 id(42)\n'
' 11 \n'
' 12 foo(1, 2, 3)\n',
bt)
def test_two_abs_args(self):
'Verify the "py-list" command with two absolute arguments'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-list 1,3'])
self.assertListing(' 1 # Sample script for use by test_gdb.py\n'
' 2 \n'
' 3 def foo(a, b, c):\n',
bt)
class StackNavigationTests(DebuggerTests):
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_pyup_command(self):
'Verify that the "py-up" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
$''')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_down_at_bottom(self):
'Verify handling of "py-down" at the bottom of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-down'])
self.assertEndsWith(bt,
'Unable to find a newer python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_up_at_top(self):
'Verify handling of "py-up" at the top of the stack'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up'] * 4)
self.assertEndsWith(bt,
'Unable to find an older python frame\n')
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_up_then_down(self):
'Verify "py-up" followed by "py-down"'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-down'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 10, in baz \(args=\(1, 2, 3\)\)
id\(42\)
$''')
class PyBtTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt(self):
'Verify that the "py-bt" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt'])
self.assertMultilineMatches(bt,
r'''^.*
Traceback \(most recent call first\):
File ".*gdb_sample.py", line 10, in baz
id\(42\)
File ".*gdb_sample.py", line 7, in bar
baz\(a, b, c\)
File ".*gdb_sample.py", line 4, in foo
bar\(a, b, c\)
File ".*gdb_sample.py", line 12, in <module>
foo\(1, 2, 3\)
''')
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_bt_full(self):
'Verify that the "py-bt-full" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-bt-full'])
self.assertMultilineMatches(bt,
r'''^.*
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 7, in bar \(a=1, b=2, c=3\)
baz\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 4, in foo \(a=1, b=2, c=3\)
bar\(a, b, c\)
#[0-9]+ Frame 0x-?[0-9a-f]+, for file .*gdb_sample.py, line 12, in <module> \(\)
foo\(1, 2, 3\)
''')
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_threads(self):
'Verify that "py-bt" indicates threads that are waiting for the GIL'
cmd = '''
from threading import Thread
class TestThread(Thread):
# These threads would run forever, but we'll interrupt things with the
# debugger
def run(self):
i = 0
while 1:
i += 1
t = {}
for i in range(4):
t[i] = TestThread()
t[i].start()
# Trigger a breakpoint on the main thread
id(42)
'''
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt'])
self.assertIn('Waiting for the GIL', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['thread apply all py-bt-full'])
self.assertIn('Waiting for the GIL', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_gc(self):
'Verify that "py-bt" indicates if a thread is garbage-collecting'
cmd = ('from gc import collect\n'
'id(42)\n'
'def foo():\n'
' collect()\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt'],
)
self.assertIn('Garbage-collecting', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
cmds_after_breakpoint=['break update_refs', 'continue', 'py-bt-full'],
)
self.assertIn('Garbage-collecting', gdb_output)
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
# Some older versions of gdb will fail with
# "Cannot find new threads: generic error"
# unless we add LD_PRELOAD=PATH-TO-libpthread.so.1 as a workaround
@unittest.skipUnless(_thread,
"Python was compiled without thread support")
def test_pycfunction(self):
'Verify that "py-bt" displays invocations of PyCFunction instances'
cmd = ('from time import sleep\n'
'def foo():\n'
' sleep(1)\n'
'def bar():\n'
' foo()\n'
'bar()\n')
# Verify with "py-bt":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_sleep',
cmds_after_breakpoint=['bt', 'py-bt'],
)
self.assertIn('<built-in method sleep', gdb_output)
# Verify with "py-bt-full":
gdb_output = self.get_stack_trace(cmd,
breakpoint='time_sleep',
cmds_after_breakpoint=['py-bt-full'],
)
self.assertIn('#0 <built-in method sleep', gdb_output)
class PyPrintTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
'Verify that the "py-print" command works'
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print args'])
self.assertMultilineMatches(bt,
r".*\nlocal 'args' = \(1, 2, 3\)\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
def test_print_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-print c', 'py-print b', 'py-print a'])
self.assertMultilineMatches(bt,
r".*\nlocal 'c' = 3\nlocal 'b' = 2\nlocal 'a' = 1\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_global(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print __name__'])
self.assertMultilineMatches(bt,
r".*\nglobal '__name__' = '__main__'\n.*")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_printing_builtin(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-print len'])
self.assertMultilineMatches(bt,
r".*\nbuiltin 'len' = <built-in method len of module object at remote 0x-?[0-9a-f]+>\n.*")
class PyLocalsTests(DebuggerTests):
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_basic_command(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-locals'])
self.assertMultilineMatches(bt,
r".*\nargs = \(1, 2, 3\)\n.*")
@unittest.skipUnless(HAS_PYUP_PYDOWN, "test requires py-up/py-down commands")
@unittest.skipIf(python_is_optimized(),
"Python was compiled with optimizations")
def test_locals_after_up(self):
bt = self.get_stack_trace(script=self.get_sample_script(),
cmds_after_breakpoint=['py-up', 'py-locals'])
self.assertMultilineMatches(bt,
r".*\na = 1\nb = 2\nc = 3\n.*")
def test_main():
run_unittest(PrettyPrintTests,
PyListTests,
StackNavigationTests,
PyBtTests,
PyPrintTests,
PyLocalsTests
)
if __name__ == "__main__":
test_main()
|
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module contains a collection of functions for calculating metrics
(distance measures) between states and operators.
"""
__all__ = ['fidelity', 'tracedist', 'bures_dist', 'bures_angle',
'hilbert_dist', 'average_gate_fidelity', 'process_fidelity']
import numpy as np
from qutip.sparse import sp_eigs
from qutip.states import ket2dm
from qutip.superop_reps import to_kraus
def fidelity(A, B):
"""
Calculates the fidelity (pseudo-metric) between two density matrices.
See: Nielsen & Chuang, "Quantum Computation and Quantum Information"
Parameters
----------
A : qobj
Density matrix or state vector.
B : qobj
Density matrix or state vector with same dimensions as A.
Returns
-------
fid : float
Fidelity pseudo-metric between A and B.
Examples
--------
>>> x = fock_dm(5,3)
>>> y = coherent_dm(5,1)
>>> fidelity(x,y)
0.24104350624628332
"""
if A.isket or A.isbra:
A = ket2dm(A)
if B.isket or B.isbra:
B = ket2dm(B)
if A.dims != B.dims:
raise TypeError('Density matrices do not have same dimensions.')
A = A.sqrtm()
return float(np.real((A * (B * A)).sqrtm().tr()))
def process_fidelity(U1, U2, normalize=True):
"""
Calculate the process fidelity given two process operators.
"""
if normalize:
return (U1 * U2).tr() / (U1.tr() * U2.tr())
else:
return (U1 * U2).tr()
def average_gate_fidelity(oper):
"""
Given a Qobj representing the supermatrix form of a map, returns the
average gate fidelity (pseudo-metric) of that map.
Parameters
----------
A : Qobj
Quantum object representing a superoperator.
Returns
-------
fid : float
Fidelity pseudo-metric between A and the identity superoperator.
"""
kraus_form = to_kraus(oper)
d = kraus_form[0].shape[0]
if kraus_form[0].shape[1] != d:
return TypeError("Average gate fielity only implemented for square "
"superoperators.")
return (d + np.sum([np.abs(A_k.tr())**2
for A_k in kraus_form])) / (d**2 + d)
def tracedist(A, B, sparse=False, tol=0):
"""
Calculates the trace distance between two density matrices..
See: Nielsen & Chuang, "Quantum Computation and Quantum Information"
Parameters
----------!=
A : qobj
Density matrix or state vector.
B : qobj
Density matrix or state vector with same dimensions as A.
tol : float
Tolerance used by sparse eigensolver, if used. (0=Machine precision)
sparse : {False, True}
Use sparse eigensolver.
Returns
-------
tracedist : float
Trace distance between A and B.
Examples
--------
>>> x=fock_dm(5,3)
>>> y=coherent_dm(5,1)
>>> tracedist(x,y)
0.9705143161472971
"""
if A.isket or A.isbra:
A = ket2dm(A)
if B.isket or B.isbra:
B = ket2dm(B)
if A.dims != B.dims:
raise TypeError("A and B do not have same dimensions.")
diff = A - B
diff = diff.dag() * diff
vals = sp_eigs(diff.data, diff.isherm, vecs=False, sparse=sparse, tol=tol)
return float(np.real(0.5 * np.sum(np.sqrt(np.abs(vals)))))
def hilbert_dist(A, B):
"""
Returns the Hilbert-Schmidt distance between two density matrices A & B.
Parameters
----------
A : qobj
Density matrix or state vector.
B : qobj
Density matrix or state vector with same dimensions as A.
Returns
-------
dist : float
Hilbert-Schmidt distance between density matrices.
Notes
-----
See V. Vedral and M. B. Plenio, Phys. Rev. A 57, 1619 (1998).
"""
if A.isket or A.isbra:
A = ket2dm(A)
if B.isket or B.isbra:
B = ket2dm(B)
if A.dims != B.dims:
raise TypeError('A and B do not have same dimensions.')
return ((A - B)**2).tr()
def bures_dist(A, B):
"""
Returns the Bures distance between two density matrices A & B.
The Bures distance ranges from 0, for states with unit fidelity,
to sqrt(2).
Parameters
----------
A : qobj
Density matrix or state vector.
B : qobj
Density matrix or state vector with same dimensions as A.
Returns
-------
dist : float
Bures distance between density matrices.
"""
if A.isket or A.isbra:
A = ket2dm(A)
if B.isket or B.isbra:
B = ket2dm(B)
if A.dims != B.dims:
raise TypeError('A and B do not have same dimensions.')
dist = np.sqrt(2.0 * (1.0 - fidelity(A, B)))
return dist
def bures_angle(A, B):
"""
Returns the Bures Angle between two density matrices A & B.
The Bures angle ranges from 0, for states with unit fidelity, to pi/2.
Parameters
----------
A : qobj
Density matrix or state vector.
B : qobj
Density matrix or state vector with same dimensions as A.
Returns
-------
angle : float
Bures angle between density matrices.
"""
if A.isket or A.isbra:
A = ket2dm(A)
if B.isket or B.isbra:
B = ket2dm(B)
if A.dims != B.dims:
raise TypeError('A and B do not have same dimensions.')
return np.arccos(fidelity(A, B))
|
|
import logbook
import logbook.queues
from jsonrpc.exceptions import JSONRPCError
from hologram import JsonSchemaMixin
from hologram.helpers import StrEnum
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from queue import Empty
from typing import Optional, Any
from dbt.contracts.rpc import (
RemoteResult,
)
from dbt.exceptions import InternalException
from dbt.utils import restrict_to
class QueueMessageType(StrEnum):
Error = 'error'
Result = 'result'
Timeout = 'timeout'
Log = 'log'
terminating = frozenset((Error, Result, Timeout))
@dataclass
class QueueMessage(JsonSchemaMixin):
message_type: QueueMessageType
@dataclass
class QueueLogMessage(QueueMessage):
message_type: QueueMessageType = field(
metadata=restrict_to(QueueMessageType.Log)
)
record: logbook.LogRecord
@classmethod
def from_record(cls, record: logbook.LogRecord):
return QueueLogMessage(
message_type=QueueMessageType.Log,
record=record,
)
@dataclass
class QueueErrorMessage(QueueMessage):
message_type: QueueMessageType = field(
metadata=restrict_to(QueueMessageType.Error)
)
error: JSONRPCError
@classmethod
def from_error(cls, error: JSONRPCError):
return QueueErrorMessage(
message_type=QueueMessageType.Error,
error=error,
)
@dataclass
class QueueResultMessage(QueueMessage):
message_type: QueueMessageType = field(
metadata=restrict_to(QueueMessageType.Result)
)
result: RemoteResult
@classmethod
def from_result(cls, result: RemoteResult):
return cls(
message_type=QueueMessageType.Result,
result=result,
)
@dataclass
class QueueTimeoutMessage(QueueMessage):
message_type: QueueMessageType = field(
metadata=restrict_to(QueueMessageType.Timeout),
)
@classmethod
def create(cls):
return cls(message_type=QueueMessageType.Timeout)
class QueueLogHandler(logbook.queues.MultiProcessingHandler):
def emit(self, record: logbook.LogRecord):
# trigger the cached proeprties here
record.pull_information()
self.queue.put_nowait(QueueLogMessage.from_record(record))
def emit_error(self, error: JSONRPCError):
self.queue.put_nowait(QueueErrorMessage.from_error(error))
def emit_result(self, result: RemoteResult):
self.queue.put_nowait(QueueResultMessage.from_result(result))
def _next_timeout(
started: datetime,
timeout: Optional[float],
) -> Optional[float]:
if timeout is None:
return None
end = started + timedelta(seconds=timeout)
message_timeout = end - datetime.utcnow()
return message_timeout.total_seconds()
class QueueSubscriber(logbook.queues.MultiProcessingSubscriber):
def _recv_raw(self, timeout: Optional[float]) -> Any:
if timeout is None:
return self.queue.get()
if timeout < 0:
return QueueTimeoutMessage.create()
try:
return self.queue.get(block=True, timeout=timeout)
except Empty:
return QueueTimeoutMessage.create()
def recv(
self,
timeout: Optional[float] = None
) -> QueueMessage:
"""Receives one record from the socket, loads it and dispatches it.
Returns the message type if something was dispatched or `None` if it
timed out.
"""
rv = self._recv_raw(timeout)
if not isinstance(rv, QueueMessage):
raise InternalException(
'Got invalid queue message: {}'.format(rv)
)
return rv
def handle_message(
self,
timeout: Optional[float]
) -> QueueMessage:
msg = self.recv(timeout)
if isinstance(msg, QueueLogMessage):
logbook.dispatch_record(msg.record)
return msg
elif msg.message_type in QueueMessageType.terminating:
return msg
else:
raise InternalException(
'Got invalid queue message type {}'.format(msg.message_type)
)
def dispatch_until_exit(
self,
started: datetime,
timeout: Optional[float] = None
) -> QueueMessage:
while True:
message_timeout = _next_timeout(started, timeout)
msg = self.handle_message(message_timeout)
if msg.message_type in QueueMessageType.terminating:
return msg
# a bunch of processors to push/pop that set various rpc-related extras
class ServerContext(logbook.Processor):
def process(self, record):
# the server context is the last processor in the stack, so it should
# not overwrite a context if it's already been set.
if not record.extra['context']:
record.extra['context'] = 'server'
class HTTPRequest(logbook.Processor):
def __init__(self, request):
self.request = request
def process(self, record):
record.extra['addr'] = self.request.remote_addr
record.extra['http_method'] = self.request.method
class RPCRequest(logbook.Processor):
def __init__(self, request):
self.request = request
super().__init__()
def process(self, record):
record.extra['request_id'] = self.request._id
record.extra['method'] = self.request.method
class RPCResponse(logbook.Processor):
def __init__(self, response):
self.response = response
super().__init__()
def process(self, record):
record.extra['response_code'] = 200
# the request_id could be None if the request was bad
record.extra['request_id'] = getattr(
self.response.request, '_id', None
)
class RequestContext(RPCRequest):
def process(self, record):
super().process(record)
record.extra['context'] = 'request'
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from kazoo.client import KazooClient
from libsentry.client import SentryClient
from libsentry.conf import HOSTNAME, PORT
from libsentry.sentry_site import get_sentry_server_ha_enabled, get_sentry_server_ha_has_security, get_sentry_server_ha_zookeeper_quorum, get_sentry_server_ha_zookeeper_namespace
import logging
import json
import random
import threading
import time
LOG = logging.getLogger(__name__)
_api_cache = None
_api_cache_lock = threading.Lock()
def ha_error_handler(func):
def decorator(*args, **kwargs):
retries = 15
while retries > 0:
try:
return func(*args, **kwargs)
except SentryException, e:
raise e
except Exception, e:
retries -= 1
if not get_sentry_server_ha_enabled() or retries == 0:
raise e
else:
# Right now retries on any error and pull a fresh list of servers from ZooKeeper
LOG.info('Retrying fetching an available client in ZooKeeper.')
global _api_cache
_api_cache = None
time.sleep(1)
args[0].client = _get_client(args[0].client.username)
LOG.info('Picked %s' % args[0].client)
return decorator
def get_api(user):
client = _get_client(user.username)
return SentryApi(client)
class SentryApi(object):
def __init__(self, client):
self.client = client
@ha_error_handler
def create_sentry_role(self, roleName):
response = self.client.create_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def drop_sentry_role(self, roleName):
response = self.client.drop_sentry_role(roleName)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_grant_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_grant_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_revoke_privilege(self, roleName, tSentryPrivilege):
response = self.client.alter_sentry_role_revoke_privilege(roleName, tSentryPrivilege)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_add_groups(self, roleName, groups):
response = self.client.alter_sentry_role_add_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def alter_sentry_role_delete_groups(self, roleName, groups):
response = self.client.alter_sentry_role_delete_groups(roleName, groups)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_roles_by_group(self, groupName=None):
response = self.client.list_sentry_roles_by_group(groupName)
if response.status.value == 0:
roles = []
for role in response.roles:
roles.append({
'name': role.roleName,
'groups': [group.groupName for group in role.groups]
})
return roles
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_by_role(self, roleName, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_by_role(roleName, authorizableHierarchy)
if response.status.value == 0:
return [self._massage_priviledge(privilege) for privilege in response.privileges]
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_for_provider(self, groups, roleSet=None, authorizableHierarchy=None):
response = self.client.list_sentry_privileges_for_provider(groups, roleSet, authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def list_sentry_privileges_by_authorizable(self, authorizableSet, groups=None, roleSet=None):
response = self.client.list_sentry_privileges_by_authorizable(authorizableSet, groups, roleSet)
_privileges = []
for authorizable, roles in response.privilegesMapByAuth.iteritems():
_roles = {}
for role, privileges in roles.privilegeMap.iteritems():
_roles[role] = [self._massage_priviledge(privilege) for privilege in privileges]
_privileges.append((self._massage_authorizable(authorizable), _roles))
if response.status.value == 0:
return _privileges
else:
raise SentryException(response)
@ha_error_handler
def drop_sentry_privileges(self, authorizableHierarchy):
response = self.client.drop_sentry_privilege(authorizableHierarchy)
if response.status.value == 0:
return response
else:
raise SentryException(response)
@ha_error_handler
def rename_sentry_privileges(self, oldAuthorizable, newAuthorizable):
response = self.client.rename_sentry_privilege(oldAuthorizable, newAuthorizable)
if response.status.value == 0:
return response
else:
raise SentryException(response)
def _massage_priviledge(self, privilege):
return {
'scope': privilege.privilegeScope,
'server': privilege.serverName,
'database': privilege.dbName,
'table': privilege.tableName,
'URI': privilege.URI,
'action': 'ALL' if privilege.action == '*' else privilege.action.upper(),
'timestamp': privilege.createTime,
'grantOption': privilege.grantOption == 1,
}
def _massage_authorizable(self, authorizable):
return {
'server': authorizable.server,
'database': authorizable.db,
'table': authorizable.table,
'URI': authorizable.uri,
}
class SentryException(Exception):
def __init__(self, e):
super(SentryException, self).__init__(e)
self.message = e.status.message
def __str__(self):
return self.message
def _get_client(username):
if get_sentry_server_ha_enabled():
servers = _get_server_properties()
if servers:
server = random.choice(servers)
else:
raise PopupException(_('No Sentry servers are available.'))
else:
server = {
'hostname': HOSTNAME.get(),
'port': PORT.get()
}
return SentryClient(server['hostname'], server['port'], username)
# To move to a libzookeeper with decorator
def _get_server_properties():
global _api_cache
if _api_cache is None:
_api_cache_lock.acquire()
try:
if _api_cache is None:
if get_sentry_server_ha_has_security():
try:
from zookeeper.conf import CLUSTERS
sasl_server_principal = CLUSTERS.get()['default'].PRINCIPAL_NAME.get()
except Exception, e:
LOG.error("Could not get principal name from ZooKeeper app config: %s. Using 'zookeeper' as principal name." % e)
sasl_server_principal = 'zookeeper'
else:
sasl_server_principal = None
zk = KazooClient(hosts=get_sentry_server_ha_zookeeper_quorum(), read_only=True, sasl_server_principal=sasl_server_principal)
zk.start()
servers = []
namespace = get_sentry_server_ha_zookeeper_namespace()
children = zk.get_children("/%s/sentry-service/sentry-service/" % namespace)
for node in children:
data, stat = zk.get("/%s/sentry-service/sentry-service/%s" % (namespace, node))
server = json.loads(data.decode("utf-8"))
servers.append({'hostname': server['address'], 'port': server['sslPort'] if server['sslPort'] else server['port']})
zk.stop()
_api_cache = servers
finally:
_api_cache_lock.release()
return _api_cache
|
|
#!/usr/bin/env python
"""Tests for export utils functions."""
import os
import stat
from grr.lib import aff4
from grr.lib import export_utils
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import sequential_collection
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import collects
from grr.lib.aff4_objects import standard
from grr.lib.flows.general import collectors
from grr.lib.hunts import results
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import file_finder as rdf_file_finder
from grr.lib.rdfvalues import paths as rdf_paths
class TestExports(test_lib.FlowTestsBaseclass):
"""Tests exporting of data."""
def setUp(self):
super(TestExports, self).setUp()
self.out = self.client_id.Add("fs/os")
self.CreateFile("testfile1")
self.CreateFile("testfile2")
self.CreateFile("testfile5")
self.CreateFile("testfile6")
self.CreateDir("testdir1")
self.CreateFile("testdir1/testfile3")
self.CreateDir("testdir1/testdir2")
self.CreateFile("testdir1/testdir2/testfile4")
self.collection_urn = self.client_id.Add("testcoll")
def CreateDir(self, dirpath):
path = self.out.Add(*dirpath.split("/"))
fd = aff4.FACTORY.Create(path, standard.VFSDirectory, token=self.token)
fd.Close()
def CreateFile(self, filepath):
path = self.out.Add(filepath)
fd = aff4.FACTORY.Create(path, aff4_grr.VFSMemoryFile, token=self.token)
fd.Write("some data")
fd.Close()
def testExportFile(self):
"""Check we can export a file without errors."""
with utils.TempDirectory() as tmpdir:
export_utils.CopyAFF4ToLocal(
self.out.Add("testfile1"), tmpdir, overwrite=True, token=self.token)
expected_outdir = os.path.join(tmpdir, self.out.Path()[1:])
self.assertTrue("testfile1" in os.listdir(expected_outdir))
def _VerifyDownload(self):
with utils.TempDirectory() as tmpdir:
export_utils.DownloadCollection(
self.collection_urn,
tmpdir,
overwrite=True,
dump_client_info=True,
token=self.token,
max_threads=2)
expected_outdir = os.path.join(tmpdir, self.out.Path()[1:])
# Check we found both files.
self.assertTrue("testfile1" in os.listdir(expected_outdir))
self.assertTrue("testfile2" in os.listdir(expected_outdir))
self.assertTrue("testfile5" in os.listdir(expected_outdir))
self.assertTrue("testfile6" in os.listdir(expected_outdir))
# Check we dumped a YAML file to the root of the client.
expected_rootdir = os.path.join(tmpdir, self.client_id.Basename())
self.assertTrue("client_info.yaml" in os.listdir(expected_rootdir))
def testDownloadHuntResultCollection(self):
"""Check we can download files references in HuntResultCollection."""
# Create a collection with URNs to some files.
fd = results.HuntResultCollection(self.collection_urn, token=self.token)
fd.AddAsMessage(rdfvalue.RDFURN(self.out.Add("testfile1")), self.client_id)
fd.AddAsMessage(
rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="testfile2", pathtype="OS")),
self.client_id)
fd.AddAsMessage(
rdf_file_finder.FileFinderResult(stat_entry=rdf_client.StatEntry(
pathspec=rdf_paths.PathSpec(path="testfile5", pathtype="OS"))),
self.client_id)
fd.AddAsMessage(
collectors.ArtifactFilesDownloaderResult(
downloaded_file=rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="testfile6", pathtype="OS"))),
self.client_id)
self._VerifyDownload()
def testDownloadCollection(self):
"""Check we can download files references in RDFValueCollection."""
fd = aff4.FACTORY.Create(
self.collection_urn, collects.RDFValueCollection, token=self.token)
self._AddTestData(fd)
fd.Close()
self._VerifyDownload()
def testDownloadGeneralIndexedCollection(self):
"""Check we can download files references in GeneralIndexedCollection."""
fd = sequential_collection.GeneralIndexedCollection(
self.collection_urn, token=self.token)
self._AddTestData(fd)
self._VerifyDownload()
def _AddTestData(self, fd):
fd.Add(rdfvalue.RDFURN(self.out.Add("testfile1")))
fd.Add(
rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="testfile2", pathtype="OS")))
fd.Add(
rdf_file_finder.FileFinderResult(stat_entry=rdf_client.StatEntry(
pathspec=rdf_paths.PathSpec(path="testfile5", pathtype="OS"))))
fd.Add(
collectors.ArtifactFilesDownloaderResult(
downloaded_file=rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="testfile6", pathtype="OS"))))
def testDownloadCollectionIgnoresArtifactResultsWithoutFiles(self):
# Create a collection with URNs to some files.
fd = aff4.FACTORY.Create(
self.collection_urn, collects.RDFValueCollection, token=self.token)
fd.Add(collectors.ArtifactFilesDownloaderResult())
fd.Close()
with utils.TempDirectory() as tmpdir:
export_utils.DownloadCollection(
self.collection_urn,
tmpdir,
overwrite=True,
dump_client_info=True,
token=self.token,
max_threads=2)
expected_outdir = os.path.join(tmpdir, self.out.Path()[1:])
self.assertFalse(os.path.exists(expected_outdir))
def testDownloadCollectionWithFlattenOption(self):
"""Check we can download files references in RDFValueCollection."""
# Create a collection with URNs to some files.
fd = aff4.FACTORY.Create(
self.collection_urn, collects.RDFValueCollection, token=self.token)
fd.Add(rdfvalue.RDFURN(self.out.Add("testfile1")))
fd.Add(
rdf_client.StatEntry(pathspec=rdf_paths.PathSpec(
path="testfile2", pathtype="OS")))
fd.Add(
rdf_file_finder.FileFinderResult(stat_entry=rdf_client.StatEntry(
pathspec=rdf_paths.PathSpec(path="testfile5", pathtype="OS"))))
fd.Close()
with utils.TempDirectory() as tmpdir:
export_utils.DownloadCollection(
self.collection_urn,
tmpdir,
overwrite=True,
dump_client_info=True,
flatten=True,
token=self.token,
max_threads=2)
# Check that "files" folder is filled with symlinks to downloaded files.
symlinks = os.listdir(os.path.join(tmpdir, "files"))
self.assertEqual(len(symlinks), 3)
self.assertListEqual(
sorted(symlinks), [
"C.1000000000000000_fs_os_testfile1",
"C.1000000000000000_fs_os_testfile2",
"C.1000000000000000_fs_os_testfile5"
])
self.assertEqual(
os.readlink(
os.path.join(tmpdir, "files",
"C.1000000000000000_fs_os_testfile1")),
os.path.join(tmpdir, "C.1000000000000000", "fs", "os", "testfile1"))
def testDownloadCollectionWithFoldersEntries(self):
"""Check we can download RDFValueCollection that also references folders."""
fd = aff4.FACTORY.Create(
self.collection_urn, collects.RDFValueCollection, token=self.token)
fd.Add(
rdf_file_finder.FileFinderResult(stat_entry=rdf_client.StatEntry(
pathspec=rdf_paths.PathSpec(path="testfile5", pathtype="OS"))))
fd.Add(
rdf_file_finder.FileFinderResult(stat_entry=rdf_client.StatEntry(
pathspec=rdf_paths.PathSpec(path="testdir1", pathtype="OS"),
st_mode=stat.S_IFDIR)))
fd.Close()
with utils.TempDirectory() as tmpdir:
export_utils.DownloadCollection(
self.collection_urn,
tmpdir,
overwrite=True,
dump_client_info=True,
token=self.token,
max_threads=2)
expected_outdir = os.path.join(tmpdir, self.out.Path()[1:])
# Check we found both files.
self.assertTrue("testfile5" in os.listdir(expected_outdir))
self.assertTrue("testdir1" in os.listdir(expected_outdir))
def testRecursiveDownload(self):
"""Check we can export a file without errors."""
with utils.TempDirectory() as tmpdir:
export_utils.RecursiveDownload(
aff4.FACTORY.Open(self.out, token=self.token), tmpdir, overwrite=True)
expected_outdir = os.path.join(tmpdir, self.out.Path()[1:])
self.assertTrue("testfile1" in os.listdir(expected_outdir))
full_outdir = os.path.join(expected_outdir, "testdir1", "testdir2")
self.assertTrue("testfile4" in os.listdir(full_outdir))
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
import os
import sys
from argparse import ArgumentTypeError
from datetime import datetime, timedelta
from c7n import cli, version, commands
from c7n.resolver import ValuesFrom
from c7n.resources import aws
from c7n.schema import ElementSchema, generate
from c7n.utils import yaml_dump, yaml_load
from .common import BaseTest, TextTestIO
class CliTest(BaseTest):
""" A subclass of BaseTest with some handy functions for CLI related tests. """
def patch_account_id(self):
def test_account_id(options):
options.account_id = self.account_id
self.patch(aws, "_default_account_id", test_account_id)
def get_output(self, argv):
""" Run cli.main with the supplied argv and return the output. """
out, err = self.run_and_expect_success(argv)
return out
def capture_output(self):
out = TextTestIO()
err = TextTestIO()
self.patch(sys, "stdout", out)
self.patch(sys, "stderr", err)
return out, err
def run_and_expect_success(self, argv):
""" Run cli.main() with supplied argv and expect normal execution. """
self.patch_account_id()
self.patch(sys, "argv", argv)
out, err = self.capture_output()
try:
cli.main()
except SystemExit as e:
self.fail(
"Expected sys.exit would not be called. Exit code was ({})".format(
e.code
)
)
return out.getvalue(), err.getvalue()
def run_and_expect_failure(self, argv, exit_code):
""" Run cli.main() with supplied argv and expect exit_code. """
self.patch_account_id()
self.patch(sys, "argv", argv)
out, err = self.capture_output()
# clear_resources()
with self.assertRaises(SystemExit) as cm:
cli.main()
self.assertEqual(cm.exception.code, exit_code)
return out.getvalue(), err.getvalue()
def run_and_expect_exception(self, argv, exception):
""" Run cli.main() with supplied argv and expect supplied exception. """
self.patch_account_id()
self.patch(sys, "argv", argv)
# clear_resources()
try:
cli.main()
except exception:
return
self.fail("Error: did not raise {}.".format(exception))
class UtilsTest(BaseTest):
def test_key_val_pair(self):
self.assertRaises(ArgumentTypeError, cli._key_val_pair, "invalid option")
param = "day=today"
self.assertIs(cli._key_val_pair(param), param)
class VersionTest(CliTest):
def test_version(self):
output = self.get_output(["custodian", "version"])
self.assertEqual(output.strip(), version.version)
def test_debug_version(self):
output = self.get_output(["custodian", "version", "--debug"])
self.assertIn(version.version, output)
self.assertIn('botocore==', output)
self.assertIn('python-dateutil==', output)
class ValidateTest(CliTest):
def test_invalidate_structure_exit(self):
invalid_policies = {"policies": [{"name": "foo"}]}
yaml_file = self.write_policy_file(invalid_policies)
self.run_and_expect_failure(["custodian", "validate", yaml_file], 1)
def test_validate(self):
invalid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [
{"type": "untag", "tags": {"custodian_cleanup": "yes"}}
],
}
]
}
yaml_file = self.write_policy_file(invalid_policies)
json_file = self.write_policy_file(invalid_policies, format="json")
# YAML validation
self.run_and_expect_exception(["custodian", "validate", yaml_file], SystemExit)
# JSON validation
self.run_and_expect_failure(["custodian", "validate", json_file], 1)
# no config files given
self.run_and_expect_failure(["custodian", "validate"], 1)
# nonexistent file given
self.run_and_expect_exception(
["custodian", "validate", "fake.yaml"], ValueError
)
valid_policies = {
"policies": [
{
"name": "foo",
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [{"type": "tag", "tags": {"custodian_cleanup": "yes"}}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
self.run_and_expect_success(["custodian", "validate", yaml_file])
# legacy -c option
self.run_and_expect_success(["custodian", "validate", "-c", yaml_file])
# duplicate policy names
self.run_and_expect_failure(["custodian", "validate", yaml_file, yaml_file], 1)
def test_deprecated(self):
deprecated = {
"policies": [
{
"name": "foo",
"resource": "ec2",
"filters": [{"tag:custodian_tagging": "not-null"}],
"actions": [{"type": "unmark", "tags": ["custodian_cleanup"]}],
}
]
}
yaml_file = self.write_policy_file(deprecated)
self.run_and_expect_success(["custodian", "validate", yaml_file])
# strict checking should fail as unmark is deprecated
self.run_and_expect_failure(["custodian", "validate", "--strict", yaml_file], 1)
class SchemaTest(CliTest):
def test_schema_outline(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "--outline", "--json", "aws"])
data = json.loads(stdout)
self.assertEqual(list(data.keys()), ["aws"])
self.assertTrue(len(data['aws']) > 100)
self.assertEqual(
sorted(data['aws']['aws.ec2'].keys()), ['actions', 'filters'])
self.assertTrue(len(data['aws']['aws.ec2']['actions']) > 10)
def test_schema_alias(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "aws.network-addr"])
self.assertIn("aws.elastic-ip:", stdout)
def test_schema_alias_unqualified(self):
stdout, stderr = self.run_and_expect_success([
"custodian", "schema", "network-addr"])
self.assertIn("aws.elastic-ip:", stdout)
def test_schema(self):
# no options
stdout, stderr = self.run_and_expect_success(["custodian", "schema"])
data = yaml_load(stdout)
assert data['resources']
# summary option
self.run_and_expect_success(["custodian", "schema", "--summary"])
# json option
self.run_and_expect_success(["custodian", "schema", "--json"])
# with just a cloud
self.run_and_expect_success(["custodian", "schema", "aws"])
# with just a resource
self.run_and_expect_success(["custodian", "schema", "ec2"])
# with just a mode
self.run_and_expect_success(["custodian", "schema", "mode"])
# mode.type
self.run_and_expect_success(["custodian", "schema", "mode.phd"])
# resource.actions
self.run_and_expect_success(["custodian", "schema", "ec2.actions"])
# resource.filters
self.run_and_expect_success(["custodian", "schema", "ec2.filters"])
# specific item
self.run_and_expect_success(["custodian", "schema", "ec2.filters.tag-count"])
def test_invalid_options(self):
# invalid resource
self.run_and_expect_failure(["custodian", "schema", "fakeresource"], 1)
# invalid category
self.run_and_expect_failure(["custodian", "schema", "ec2.arglbargle"], 1)
# invalid item
self.run_and_expect_failure(
["custodian", "schema", "ec2.filters.nonexistent"], 1
)
# invalid number of selectors
self.run_and_expect_failure(["custodian", "schema", "ec2.filters.and.foo"], 1)
def test_schema_output(self):
output = self.get_output(["custodian", "schema"])
self.assertIn("aws.ec2", output)
# self.assertIn("azure.vm", output)
# self.assertIn("gcp.instance", output)
output = self.get_output(["custodian", "schema", "aws"])
self.assertIn("aws.ec2", output)
self.assertNotIn("azure.vm", output)
self.assertNotIn("gcp.instance", output)
output = self.get_output(["custodian", "schema", "aws.ec2"])
self.assertIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2"])
self.assertIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2.filters"])
self.assertNotIn("actions:", output)
self.assertIn("filters:", output)
output = self.get_output(["custodian", "schema", "ec2.filters.image"])
self.assertIn("Help", output)
def test_schema_expand(self):
# refs should only ever exist in a dictionary by itself
test_schema = {
'$ref': '#/definitions/filters_common/value_from'
}
result = ElementSchema.schema(generate()['definitions'], test_schema)
self.assertEqual(result, ValuesFrom.schema)
def test_schema_multi_expand(self):
test_schema = {
'schema1': {
'$ref': '#/definitions/filters_common/value_from'
},
'schema2': {
'$ref': '#/definitions/filters_common/value_from'
}
}
expected = yaml_dump({
'schema1': {
'type': 'object',
'additionalProperties': 'False',
'required': ['url'],
'properties': {
'url': {'type': 'string'},
'format': {'enum': ['csv', 'json', 'txt', 'csv2dict']},
'expr': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}]}
}
},
'schema2': {
'type': 'object',
'additionalProperties': 'False',
'required': ['url'],
'properties': {
'url': {'type': 'string'},
'format': {'enum': ['csv', 'json', 'txt', 'csv2dict']},
'expr': {'oneOf': [
{'type': 'integer'},
{'type': 'string'}]}
}
}
})
result = yaml_dump(ElementSchema.schema(generate()['definitions'], test_schema))
self.assertEqual(result, expected)
def test_schema_expand_not_found(self):
test_schema = {
'$ref': '#/definitions/filters_common/invalid_schema'
}
result = ElementSchema.schema(generate()['definitions'], test_schema)
self.assertEqual(result, None)
class ReportTest(CliTest):
def test_report(self):
policy_name = "ec2-running-instances"
valid_policies = {
"policies": [
{
"name": policy_name,
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
output = self.get_output(
["custodian", "report", "-s", self.output_dir, yaml_file]
)
self.assertIn("InstanceId", output)
self.assertIn("i-014296505597bf519", output)
# ASCII formatted test
output = self.get_output(
[
"custodian",
"report",
"--format",
"grid",
"-s",
self.output_dir,
yaml_file,
]
)
self.assertIn("InstanceId", output)
self.assertIn("i-014296505597bf519", output)
# json format
output = self.get_output(
["custodian", "report", "--format", "json", "-s", self.output_dir, yaml_file]
)
self.assertTrue("i-014296505597bf519", json.loads(output)[0]['InstanceId'])
# empty file
temp_dir = self.get_temp_dir()
empty_policies = {"policies": []}
yaml_file = self.write_policy_file(empty_policies)
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, yaml_file], 1
)
# more than 1 policy
policies = {
"policies": [
{"name": "foo", "resource": "s3"}, {"name": "bar", "resource": "ec2"}
]
}
yaml_file = self.write_policy_file(policies)
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, yaml_file], 1
)
def test_warning_on_empty_policy_filter(self):
# This test is to examine the warning output supplied when -p is used and
# the resulting policy set is empty. It is not specific to the `report`
# subcommand - it is also used by `run` and a few other subcommands.
policy_name = "test-policy"
valid_policies = {
"policies": [
{
"name": policy_name,
"resource": "s3",
"filters": [{"tag:custodian_tagging": "not-null"}],
}
]
}
yaml_file = self.write_policy_file(valid_policies)
temp_dir = self.get_temp_dir()
bad_policy_name = policy_name + "-nonexistent"
log_output = self.capture_logging("custodian.commands")
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, "-p", bad_policy_name, yaml_file], 1
)
self.assertIn(policy_name, log_output.getvalue())
bad_resource_name = "foo"
self.run_and_expect_failure(
["custodian", "report", "-s", temp_dir, "-t", bad_resource_name, yaml_file],
1,
)
class LogsTest(CliTest):
def test_logs(self):
temp_dir = self.get_temp_dir()
# Test 1 - empty file
empty_policies = {"policies": []}
yaml_file = self.write_policy_file(empty_policies)
self.run_and_expect_failure(["custodian", "logs", "-s", temp_dir, yaml_file], 1)
# Test 2 - more than one policy
policies = {
"policies": [
{"name": "foo", "resource": "s3"}, {"name": "bar", "resource": "ec2"}
]
}
yaml_file = self.write_policy_file(policies)
self.run_and_expect_failure(["custodian", "logs", "-s", temp_dir, yaml_file], 1)
# Test 3 - successful test
p_data = {
"name": "test-policy",
"resource": "rds",
"filters": [
{"key": "GroupName", "type": "security-group", "value": "default"}
],
"actions": [{"days": 10, "type": "retention"}],
}
yaml_file = self.write_policy_file({"policies": [p_data]})
output_dir = os.path.join(os.path.dirname(__file__), "data", "logs")
self.run_and_expect_failure(["custodian", "logs", "-s", output_dir, yaml_file], 1)
class RunTest(CliTest):
def test_ec2(self):
session_factory = self.replay_flight_data(
"test_ec2_state_transition_age_filter"
)
from c7n.policy import PolicyCollection
self.patch(
PolicyCollection,
"session_factory",
staticmethod(lambda x=None: session_factory),
)
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "ec2-state-transition-age",
"resource": "ec2",
"filters": [
{"State.Name": "running"}, {"type": "state-age", "days": 30}
],
}
]
}
)
# TODO - capture logging and ensure the following
# self.assertIn('Running policy ec2-state-transition-age', logs)
# self.assertIn('metric:ResourceCount Count:1 policy:ec2-state-transition-age', logs)
self.run_and_expect_success(
[
"custodian",
"run",
"--cache",
temp_dir + "/cache",
"-s",
temp_dir,
yaml_file,
]
)
def test_error(self):
from c7n.policy import Policy
self.patch(
Policy, "__call__", lambda x: (_ for _ in ()).throw(Exception("foobar"))
)
#
# Make sure that if the policy causes an exception we error out
#
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "error",
"resource": "ec2",
"filters": [
{"State.Name": "running"}, {"type": "state-age", "days": 30}
],
}
]
}
)
self.run_and_expect_failure(
[
"custodian",
"run",
"--cache",
temp_dir + "/cache",
"-s",
temp_dir,
yaml_file,
],
2,
)
#
# Test --debug
#
class CustomError(Exception):
pass
import pdb
self.patch(pdb, "post_mortem", lambda x: (_ for _ in ()).throw(CustomError))
self.run_and_expect_exception(
["custodian", "run", "-s", temp_dir, "--debug", yaml_file], CustomError
)
class MetricsTest(CliTest):
def test_metrics(self):
session_factory = self.replay_flight_data("test_lambda_policy_metrics")
from c7n.policy import PolicyCollection
self.patch(
PolicyCollection,
"session_factory",
staticmethod(lambda x=None: session_factory),
)
yaml_file = self.write_policy_file(
{
"policies": [
{
"name": "ec2-tag-compliance-v6",
"resource": "ec2",
"mode": {"type": "ec2-instance-state", "events": ["running"]},
"filters": [
{"tag:custodian_status": "absent"},
{
"or": [
{"tag:App": "absent"},
{"tag:Env": "absent"},
{"tag:Owner": "absent"},
]
},
],
}
]
}
)
end = datetime.utcnow()
start = end - timedelta(14)
period = 24 * 60 * 60 * 14
self.run_and_expect_failure(
[
"custodian",
"metrics",
"--start",
str(start),
"--end",
str(end),
"--period",
str(period),
yaml_file,
],
1
)
def test_metrics_get_endpoints(self):
#
# Test for defaults when --start is not supplied
#
class FakeOptions:
start = end = None
days = 5
options = FakeOptions()
start, end = commands._metrics_get_endpoints(options)
self.assertEqual((end - start).days, options.days)
#
# Test that --start and --end have to be passed together
#
policy = {
"policies": [
{
"name": "metrics-test",
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
yaml_file = self.write_policy_file(policy)
self.run_and_expect_failure(
["custodian", "metrics", "--start", "1", yaml_file], 1
)
class MiscTest(CliTest):
def test_no_args(self):
stdout, stderr = self.run_and_expect_failure(["custodian"], 2)
self.assertIn("metrics", stderr)
self.assertIn("logs", stderr)
def test_empty_policy_file(self):
# Doesn't do anything, but should exit 0
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file({})
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file], 1)
def test_nonexistent_policy_file(self):
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file({})
nonexistent = yaml_file + ".bad"
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file, nonexistent], 1
)
def test_duplicate_policy(self):
policy = {
"policies": [
{
"name": "metrics-test",
"resource": "ec2",
"query": [{"instance-state-name": "running"}],
}
]
}
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(policy)
self.run_and_expect_failure(
["custodian", "run", "-s", temp_dir, yaml_file, yaml_file], 1
)
def test_failure_with_no_default_region(self):
policy = {"policies": [{"name": "will-never-run", "resource": "ec2"}]}
temp_dir = self.get_temp_dir()
yaml_file = self.write_policy_file(policy)
self.patch(aws, "get_profile_session", lambda x: None)
self.run_and_expect_failure(["custodian", "run", "-s", temp_dir, yaml_file], 1)
|
|
# XXX clean up these tests to use more uniform helpers
import py
from rpython.flowspace.model import Variable, Constant, checkgraph
from rpython.translator.backendopt import canraise
from rpython.translator.backendopt.inline import (simple_inline_function,
CannotInline, auto_inlining, Inliner, collect_called_graphs,
measure_median_execution_cost, instrument_inline_candidates,
auto_inline_graphs)
from rpython.translator.translator import TranslationContext, graphof
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rarithmetic import ovfcheck
from rpython.translator.test.snippet import is_perfect_number
from rpython.translator.backendopt.all import INLINE_THRESHOLD_FOR_TEST
from rpython.conftest import option
from rpython.translator.backendopt import removenoops
from rpython.flowspace.model import summary
def sanity_check(t):
# look for missing '.concretetype'
for graph in t.graphs:
checkgraph(graph)
for node in graph.iterblocks():
for v in node.inputargs:
assert hasattr(v, 'concretetype')
for op in node.operations:
for v in op.args:
assert hasattr(v, 'concretetype')
assert hasattr(op.result, 'concretetype')
for node in graph.iterlinks():
if node.exitcase is not None:
assert hasattr(node, 'llexitcase')
for v in node.args:
assert hasattr(v, 'concretetype')
if isinstance(node.last_exception, (Variable, Constant)):
assert hasattr(node.last_exception, 'concretetype')
if isinstance(node.last_exc_value, (Variable, Constant)):
assert hasattr(node.last_exc_value, 'concretetype')
class CustomError1(Exception):
def __init__(self):
self.data = 123
class CustomError2(Exception):
def __init__(self):
self.data2 = 456
class TestInline(BaseRtypingTest):
def translate(self, func, argtypes):
t = TranslationContext()
t.buildannotator().build_types(func, argtypes)
t.buildrtyper().specialize()
return t
def check_inline(self, func, in_func, sig, entry=None,
inline_guarded_calls=False,
graph=False):
if entry is None:
entry = in_func
t = self.translate(entry, sig)
# inline!
sanity_check(t) # also check before inlining (so we don't blame it)
if option.view:
t.view()
raise_analyzer = canraise.RaiseAnalyzer(t)
inliner = Inliner(t, graphof(t, in_func), func,
t.rtyper.lltype_to_classdef_mapping(),
inline_guarded_calls,
raise_analyzer=raise_analyzer)
inliner.inline_all()
if option.view:
t.view()
sanity_check(t)
interp = LLInterpreter(t.rtyper)
def eval_func(args):
return interp.eval_graph(graphof(t, entry), args)
if graph:
return eval_func, graphof(t, func)
return eval_func
def check_auto_inlining(self, func, sig, multiplier=None, call_count_check=False,
remove_same_as=False, heuristic=None, const_fold_first=False):
t = self.translate(func, sig)
if const_fold_first:
from rpython.translator.backendopt.constfold import constant_fold_graph
from rpython.translator.simplify import eliminate_empty_blocks
for graph in t.graphs:
constant_fold_graph(graph)
eliminate_empty_blocks(graph)
if option.view:
t.view()
# inline!
sanity_check(t) # also check before inlining (so we don't blame it)
threshold = INLINE_THRESHOLD_FOR_TEST
if multiplier is not None:
threshold *= multiplier
call_count_pred = None
if call_count_check:
call_count_pred = lambda lbl: True
instrument_inline_candidates(t.graphs, threshold)
if remove_same_as:
for graph in t.graphs:
removenoops.remove_same_as(graph)
if heuristic is not None:
kwargs = {"heuristic": heuristic}
else:
kwargs = {}
auto_inlining(t, threshold, call_count_pred=call_count_pred, **kwargs)
sanity_check(t)
if option.view:
t.view()
interp = LLInterpreter(t.rtyper)
def eval_func(args):
return interp.eval_graph(graphof(t, func), args)
return eval_func, t
def test_inline_simple(self):
def f(x, y):
return (g(x, y) + 1) * x
def g(x, y):
if x > 0:
return x * y
else:
return -x * y
eval_func = self.check_inline(g, f, [int, int])
result = eval_func([-1, 5])
assert result == f(-1, 5)
result = eval_func([2, 12])
assert result == f(2, 12)
def test_nothing_to_inline(self):
def f():
return 1
def g():
return 2
eval_func = self.check_inline(g, f, [])
assert eval_func([]) == 1
def test_inline_big(self):
def f(x):
result = []
for i in range(1, x+1):
if is_perfect_number(i):
result.append(i)
return result
eval_func = self.check_inline(is_perfect_number, f, [int])
result = eval_func([10])
result = self.ll_to_list(result)
assert len(result) == len(f(10))
def test_inline_raising(self):
def f(x):
if x == 1:
raise CustomError1
return x
def g(x):
a = f(x)
if x == 2:
raise CustomError2
def h(x):
try:
g(x)
except CustomError1:
return 1
except CustomError2:
return 2
return x
eval_func = self.check_inline(f,g, [int], entry=h)
result = eval_func([0])
assert result == 0
result = eval_func([1])
assert result == 1
result = eval_func([2])
assert result == 2
def test_inline_several_times(self):
def f(x):
return (x + 1) * 2
def g(x):
if x:
a = f(x) + f(x)
else:
a = f(x) + 1
return a + f(x)
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == g(0)
result = eval_func([42])
assert result == g(42)
def test_always_inline(self):
def f(x, y, z, k):
p = (((x, y), z), k)
return p[0][0][0] + p[-1]
f._always_inline_ = True
def g(x, y, z, k):
a = f(x, y, z, k)
return a
eval_func, t = self.check_auto_inlining(g, [int, int, int, int], multiplier=0.1)
graph = graphof(t, g)
s = summary(graph)
assert len(s) > 3
def test_inline_exceptions(self):
customError1 = CustomError1()
customError2 = CustomError2()
def f(x):
if x == 0:
raise customError1
if x == 1:
raise customError2
def g(x):
try:
f(x)
except CustomError1:
return 2
except CustomError2:
return x+2
return 1
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_const_exceptions(self):
valueError = ValueError()
keyError = KeyError()
def f(x):
if x == 0:
raise valueError
if x == 1:
raise keyError
def g(x):
try:
f(x)
except ValueError:
return 2
except KeyError:
return x+2
return 1
eval_func = self.check_inline(f, g, [int])
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_exception_guarded(self):
def h(x):
if x == 1:
raise CustomError1()
elif x == 2:
raise CustomError2()
return 1
def f(x):
try:
return h(x)
except:
return 87
def g(x):
try:
return f(x)
except CustomError1:
return 2
eval_func = self.check_inline(f, g, [int], inline_guarded_calls=True)
result = eval_func([0])
assert result == 1
result = eval_func([1])
assert result == 87
result = eval_func([2])
assert result == 87
def test_inline_with_raising_non_call_op(self):
class A:
pass
def f():
return A()
def g():
try:
a = f()
except MemoryError:
return 1
return 2
py.test.raises(CannotInline, self.check_inline, f, g, [])
def test_inline_var_exception(self):
def f(x):
e = None
if x == 0:
e = CustomError1()
elif x == 1:
e = KeyError()
if x == 0 or x == 1:
raise e
def g(x):
try:
f(x)
except CustomError1:
return 2
except KeyError:
return 3
return 1
eval_func, _ = self.check_auto_inlining(g, [int], multiplier=10)
result = eval_func([0])
assert result == 2
result = eval_func([1])
assert result == 3
result = eval_func([42])
assert result == 1
def test_inline_nonraising_into_catching(self):
def f(x):
return x+1
def g(x):
try:
return f(x)
except KeyError:
return 42
eval_func = self.check_inline(f, g, [int])
result = eval_func([7654])
assert result == 7655
def DONOTtest_call_call(self):
# for reference. Just remove this test if we decide not to support
# catching exceptions while inlining a graph that contains further
# direct_calls.
def e(x):
if x < 0:
raise KeyError
return x+1
def f(x):
return e(x)+2
def g(x):
try:
return f(x)+3
except KeyError:
return -1
eval_func = self.check_inline(f, g, [int])
result = eval_func([100])
assert result == 106
result = eval_func(g, [-100])
assert result == -1
def test_for_loop(self):
def f(x):
result = 0
for i in range(0, x):
result += i
return result
t = self.translate(f, [int])
sanity_check(t) # also check before inlining (so we don't blame it)
for graph in t.graphs:
if graph.name.startswith('ll_rangenext'):
break
else:
assert 0, "cannot find ll_rangenext_*() function"
simple_inline_function(t, graph, graphof(t, f))
sanity_check(t)
interp = LLInterpreter(t.rtyper)
result = interp.eval_graph(graphof(t, f), [10])
assert result == 45
def test_inline_constructor(self):
class A:
def __init__(self, x, y):
self.bounds = (x, y)
def area(self, height=10):
return height * (self.bounds[1] - self.bounds[0])
def f(i):
a = A(117, i)
return a.area()
eval_func = self.check_inline(A.__init__.im_func, f, [int])
result = eval_func([120])
assert result == 30
def test_cannot_inline_recursive_function(self):
def factorial(n):
if n > 1:
return n * factorial(n-1)
else:
return 1
def f(n):
return factorial(n//2)
py.test.raises(CannotInline, self.check_inline, factorial, f, [int])
def test_auto_inlining_small_call_big(self):
def leaf(n):
total = 0
i = 0
while i < n:
total += i
if total > 100:
raise OverflowError
i += 1
return total
def g(n):
return leaf(n)
def f(n):
try:
return g(n)
except OverflowError:
return -1
eval_func, t = self.check_auto_inlining(f, [int], multiplier=10)
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 0
result = eval_func([10])
assert result == 45
result = eval_func([15])
assert result == -1
def test_auto_inlining_small_call_big_call_count(self):
def leaf(n):
total = 0
i = 0
while i < n:
total += i
if total > 100:
raise OverflowError
i += 1
return total
def g(n):
return leaf(n)
def f(n):
try:
return g(n)
except OverflowError:
return -1
eval_func, t = self.check_auto_inlining(f, [int], multiplier=10,
call_count_check=True)
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 0
result = eval_func([10])
assert result == 45
result = eval_func([15])
assert result == -1
def test_inline_exception_catching(self):
def f3():
raise CustomError1
def f2():
try:
f3()
except CustomError1:
return True
else:
return False
def f():
return f2()
eval_func = self.check_inline(f2, f, [])
result = eval_func([])
assert result is True
def test_inline_catching_different_exception(self):
d = {1: 2}
def f2(n):
try:
return ovfcheck(n+1)
except OverflowError:
raise
def f(n):
try:
return f2(n)
except ValueError:
return -1
eval_func = self.check_inline(f2, f, [int])
result = eval_func([54])
assert result == 55
def test_inline_raiseonly(self):
c = CustomError1()
def f2(x):
raise c
def f(x):
try:
return f2(x)
except CustomError1:
return 42
eval_func = self.check_inline(f2, f, [int])
result = eval_func([98371])
assert result == 42
def test_measure_median_execution_cost(self):
def f(x):
x += 1
x += 1
x += 1
while True:
x += 1
x += 1
x += 1
if x: break
x += 1
x += 1
x += 1
x += 1
x += 1
x += 1
return x
t = TranslationContext()
graph = t.buildflowgraph(f)
res = measure_median_execution_cost(graph)
assert round(res, 5) == round(32.333333333, 5)
def test_indirect_call_with_exception(self):
class Dummy:
pass
def x1():
return Dummy() # can raise MemoryError
def x2():
return None
def x3(x):
if x:
f = x1
else:
f = x2
return f()
def x4():
try:
x3(0)
x3(1)
except CustomError2:
return 0
return 1
assert x4() == 1
py.test.raises(CannotInline, self.check_inline, x3, x4, [])
def test_list_iteration(self):
def f():
tot = 0
for item in [1,2,3]:
tot += item
return tot
eval_func, t = self.check_auto_inlining(f, [])
f_graph = graphof(t, f)
called_graphs = collect_called_graphs(f_graph, t)
assert len(called_graphs) == 0
result = eval_func([])
assert result == 6
def test_bug_in_find_exception_type(self):
def h():
pass
def g(i):
if i > 0:
raise IndexError
else:
h()
def f(i):
try:
g(i)
except IndexError:
pass
eval_func, t = self.check_auto_inlining(f, [int], remove_same_as=True,
const_fold_first=True)
eval_func([-66])
eval_func([282])
def test_correct_keepalive_placement(self):
def h(x):
if not x:
raise ValueError
return 1
def f(x):
s = "a %s" % (x, )
try:
h(len(s))
except ValueError:
pass
return -42
eval_func, t = self.check_auto_inlining(f, [int])
res = eval_func([42])
assert res == -42
def test_keepalive_hard_case(self):
from rpython.rtyper.lltypesystem import lltype
Y = lltype.Struct('y', ('n', lltype.Signed))
X = lltype.GcStruct('x', ('y', Y))
def g(x):
if x:
return 3
else:
return 4
def f():
x = lltype.malloc(X)
x.y.n = 2
y = x.y
z1 = g(y.n)
z = y.n
return z+z1
eval_func = self.check_inline(g, f, [])
res = eval_func([])
assert res == 5
def test_auto_inline_graphs_from_anywhere(self):
def leaf(n):
return n
def f(n):
return leaf(n)
t = self.translate(f, [int])
f_graph = graphof(t, f)
assert len(collect_called_graphs(f_graph, t)) == 1
auto_inline_graphs(t, [f_graph], 32)
assert len(collect_called_graphs(f_graph, t)) == 1
auto_inline_graphs(t, [f_graph], 32, inline_graph_from_anywhere=True)
assert len(collect_called_graphs(f_graph, t)) == 0
|
|
import re
import os
import six
class Compiler(object):
RE_INTERPOLATE = re.compile(r'(\\)?([#!]){(.*?)}')
doctypes = {
'5': '<!DOCTYPE html>'
, 'xml': '<?xml version="1.0" encoding="utf-8" ?>'
, 'default': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'transitional': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
, 'strict': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">'
, 'frameset': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">'
, '1.1': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
, 'basic': '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML Basic 1.1//EN" "http://www.w3.org/TR/xhtml-basic/xhtml-basic11.dtd">'
, 'mobile': '<!DOCTYPE html PUBLIC "-//WAPFORUM//DTD XHTML Mobile 1.2//EN" "http://www.openmobilealliance.org/tech/DTD/xhtml-mobile12.dtd">'
}
inlineTags = [
'a'
, 'abbr'
, 'acronym'
, 'b'
, 'br'
, 'code'
, 'em'
, 'font'
, 'i'
, 'img'
, 'ins'
, 'kbd'
, 'map'
, 'samp'
, 'small'
, 'span'
, 'strong'
, 'sub'
, 'sup'
, 'textarea'
]
selfClosing = [
'meta'
, 'img'
, 'link'
, 'input'
, 'area'
, 'base'
, 'col'
, 'br'
, 'hr'
]
autocloseCode = 'if,for,block,filter,autoescape,with,trans,spaceless,comment,cache,macro,localize,compress,raw'.split(',')
filters = {}
def __init__(self, node, **options):
self.options = options
self.node = node
self.hasCompiledDoctype = False
self.hasCompiledTag = False
self.pp = options.get('pretty', True)
self.debug = options.get('compileDebug', False) is not False
self.filters.update(options.get('filters', {}))
self.doctypes.update(options.get('doctypes', {}))
# self.var_processor = options.get('var_processor', lambda x: x)
self.selfClosing.extend(options.get('selfClosing', []))
self.autocloseCode.extend(options.get('autocloseCode', []))
self.inlineTags.extend(options.get('inlineTags', []))
self.useRuntime = options.get('useRuntime', True)
self.extension = options.get('extension', None) or '.jade'
self.indents = 0
self.doctype = None
self.terse = False
self.xml = False
self.mixing = 0
self.variable_start_string = options.get("variable_start_string", "{{")
self.variable_end_string = options.get("variable_end_string", "}}")
if 'doctype' in self.options: self.setDoctype(options['doctype'])
self.instring = False
def var_processor(self, var):
if isinstance(var,six.string_types) and var.startswith('_ '):
var = '_("%s")'%var[2:]
return var
def compile_top(self):
return ''
def compile(self):
self.buf = [self.compile_top()]
self.lastBufferedIdx = -1
self.visit(self.node)
compiled = u''.join(self.buf)
if isinstance(compiled, six.binary_type):
compiled = six.text_type(compiled, 'utf8')
return compiled
def setDoctype(self, name):
self.doctype = self.doctypes.get(name or 'default',
'<!DOCTYPE %s>' % name)
self.terse = name in ['5','html']
self.xml = self.doctype.startswith('<?xml')
def buffer(self, str):
if self.lastBufferedIdx == len(self.buf):
self.lastBuffered += str
self.buf[self.lastBufferedIdx - 1] = self.lastBuffered
else:
self.buf.append(str)
self.lastBuffered = str;
self.lastBufferedIdx = len(self.buf)
def visit(self, node, *args, **kwargs):
# debug = self.debug
# if debug:
# self.buf.append('__jade.unshift({ lineno: %d, filename: %s });' % (node.line,('"%s"'%node.filename) if node.filename else '__jade[0].filename'));
# if node.debug==False and self.debug:
# self.buf.pop()
# self.buf.pop()
self.visitNode(node, *args, **kwargs)
# if debug: self.buf.append('__jade.shift();')
def visitNode (self, node, *args, **kwargs):
name = node.__class__.__name__
if self.instring and name != 'Tag':
self.buffer('\n')
self.instring = False
return getattr(self, 'visit%s' % name)(node, *args, **kwargs)
def visitLiteral(self, node):
self.buffer(node.str)
def visitBlock(self, block):
for node in block.nodes:
self.visit(node)
def visitCodeBlock(self, block):
self.buffer('{%% block %s %%}' % block.name)
if block.mode=='prepend':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.visitBlock(block)
if block.mode == 'append':
self.buffer('%ssuper()%s' % (self.variable_start_string,
self.variable_end_string))
self.buffer('{% endblock %}')
def visitDoctype(self,doctype=None):
if doctype and (doctype.val or not self.doctype):
self.setDoctype(doctype.val or 'default')
if self.doctype:
self.buffer(self.doctype)
self.hasCompiledDoctype = True
def visitMixin(self,mixin):
if mixin.block:
self.buffer('{%% macro %s(%s) %%}' % (mixin.name, mixin.args))
self.visitBlock(mixin.block)
self.buffer('{% endmacro %}')
else:
self.buffer('%s%s(%s)%s' % (self.variable_start_string, mixin.name,
mixin.args, self.variable_end_string))
def visitTag(self,tag):
self.indents += 1
name = tag.name
if not self.hasCompiledTag:
if not self.hasCompiledDoctype and 'html' == name:
self.visitDoctype()
self.hasCompiledTag = True
if self.pp and name not in self.inlineTags and not tag.inline:
self.buffer('\n' + ' ' * (self.indents - 1))
if name in self.inlineTags or tag.inline:
self.instring = False
closed = name in self.selfClosing and not self.xml
if tag.text:
t = tag.text.nodes[0]
if t.startswith(u'/'):
if len(t) > 1:
raise Exception('%s is self closing and should not have content.' % name)
closed = True
self.buffer('<%s' % name)
self.visitAttributes(tag.attrs)
self.buffer('/>' if not self.terse and closed else '>')
if not closed:
if tag.code: self.visitCode(tag.code)
if tag.text: self.buffer(self.interpolate(tag.text.nodes[0].lstrip()))
self.escape = 'pre' == tag.name
# empirically check if we only contain text
textOnly = tag.textOnly or not bool(len(tag.block.nodes))
self.instring = False
self.visit(tag.block)
if self.pp and not name in self.inlineTags and not textOnly:
self.buffer('\n' + ' ' * (self.indents-1))
self.buffer('</%s>' % name)
self.indents -= 1
def visitFilter(self,filter):
if filter.name not in self.filters:
if filter.isASTFilter:
raise Exception('unknown ast filter "%s"' % filter.name)
else:
raise Exception('unknown filter "%s"' % filter.name)
fn = self.filters.get(filter.name)
if filter.isASTFilter:
self.buf.append(fn(filter.block, self, filter.attrs))
else:
text = ''.join(filter.block.nodes)
text = self.interpolate(text)
filter.attrs = filter.attrs or {}
filter.attrs['filename'] = self.options.get('filename', None)
self.buffer(fn(text, filter.attrs))
def _interpolate(self, attr, repl):
return self.RE_INTERPOLATE.sub(lambda matchobj:repl(matchobj.group(3)),
attr)
def interpolate(self, text, escape=None):
def repl(matchobj):
if escape is None:
if matchobj.group(2) == '!':
filter_string = ''
else:
filter_string = '|escape'
elif escape is True:
filter_string = '|escape'
elif escape is False:
filter_string = ''
return self.variable_start_string + matchobj.group(3) + \
filter_string + self.variable_end_string
return self.RE_INTERPOLATE.sub(repl, text)
def visitText(self,text):
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
if self.pp:
self.buffer('\n')
def visitString(self,text):
instring = not text.inline
text = ''.join(text.nodes)
text = self.interpolate(text)
self.buffer(text)
self.instring = instring
def visitComment(self,comment):
if not comment.buffer: return
if self.pp:
self.buffer('\n' + ' ' * (self.indents))
self.buffer('<!--%s-->' % comment.val)
def visitAssignment(self,assignment):
self.buffer('{%% set %s = %s %%}' % (assignment.name, assignment.val))
def format_path(self,path):
has_extension = os.path.basename(path).find('.') > -1
if not has_extension:
path += self.extension
return path
def visitExtends(self,node):
path = self.format_path(node.path)
self.buffer('{%% extends "%s" %%}' % (path))
def visitInclude(self,node):
path = self.format_path(node.path)
self.buffer('{%% include "%s" %%}' % (path))
def visitBlockComment(self, comment):
if not comment.buffer:
return
isConditional = comment.val.strip().startswith('if')
self.buffer('<!--[%s]>' % comment.val.strip() if isConditional else '<!--%s' % comment.val)
self.visit(comment.block)
self.buffer('<![endif]-->' if isConditional else '-->')
def visitConditional(self, conditional):
TYPE_CODE = {
'if': lambda x: 'if %s'%x,
'unless': lambda x: 'if not %s'%x,
'elif': lambda x: 'elif %s'%x,
'else': lambda x: 'else'
}
self.buf.append('{%% %s %%}' % TYPE_CODE[conditional.type](conditional.sentence))
if conditional.block:
self.visit(conditional.block)
for next in conditional.next:
self.visitConditional(next)
if conditional.type in ['if','unless']:
self.buf.append('{% endif %}')
def visitVar(self, var, escape=False):
var = self.var_processor(var)
return ('%s%s%s%s' % (self.variable_start_string, var,
'|escape' if escape else '', self.variable_end_string))
def visitCode(self,code):
if code.buffer:
val = code.val.lstrip()
self.buf.append(self.visitVar(val, code.escape))
else:
self.buf.append('{%% %s %%}' % code.val)
if code.block:
# if not code.buffer: self.buf.append('{')
self.visit(code.block)
# if not code.buffer: self.buf.append('}')
if not code.buffer:
codeTag = code.val.strip().split(' ', 1)[0]
if codeTag in self.autocloseCode:
self.buf.append('{%% end%s %%}' % codeTag)
def visitEach(self,each):
self.buf.append('{%% for %s in %s|__pyjade_iter:%d %%}' % (','.join(each.keys), each.obj, len(each.keys)))
self.visit(each.block)
self.buf.append('{% endfor %}')
def attributes(self,attrs):
return "%s__pyjade_attrs(%s)%s" % (self.variable_start_string, attrs, self.variable_end_string)
def visitDynamicAttributes(self, attrs):
buf, classes, params = [], [], {}
terse='terse=True' if self.terse else ''
for attr in attrs:
if attr['name'] == 'class':
classes.append('(%s)' % attr['val'])
else:
pair = "('%s',(%s))" % (attr['name'], attr['val'])
buf.append(pair)
if classes:
classes = " , ".join(classes)
buf.append("('class', (%s))" % classes)
buf = ', '.join(buf)
if self.terse: params['terse'] = 'True'
if buf: params['attrs'] = '[%s]' % buf
param_string = ', '.join(['%s=%s' % (n, v) for n, v in six.iteritems(params)])
if buf or terse:
self.buf.append(self.attributes(param_string))
def visitAttributes(self, attrs):
temp_attrs = []
for attr in attrs:
if (not self.useRuntime and not attr['name']=='class') or attr['static']: #
if temp_attrs:
self.visitDynamicAttributes(temp_attrs)
temp_attrs = []
n, v = attr['name'], attr['val']
if isinstance(v, six.string_types):
if self.useRuntime or attr['static']:
self.buf.append(' %s=%s' % (n, v))
else:
self.buf.append(' %s="%s"' % (n, self.visitVar(v)))
elif v is True:
if self.terse:
self.buf.append(' %s' % (n,))
else:
self.buf.append(' %s="%s"' % (n, n))
else:
temp_attrs.append(attr)
if temp_attrs: self.visitDynamicAttributes(temp_attrs)
@classmethod
def register_filter(cls, name, f):
cls.filters[name] = f
@classmethod
def register_autoclosecode(cls, name):
cls.autocloseCode.append(name)
#1-
|
|
"""
Tests for models in the photo application.
"""
from datetime import date
from django.core.exceptions import ValidationError
from django.test import TestCase
from photo import models
# Model tests
class FilmFormatTestCase(TestCase):
"""
Tests for :model:`photo.FilmFormat`
"""
def test_str(self):
"""
Test the __str__ method on :model:`photo.FilmFormat`
"""
test_format = models.FilmFormat(name="test film_format",
roll_film=True)
self.assertEqual(str(test_format), "test film_format",
"FilmFormat.__str__ returned unexpected value.")
class ManufacturerTestCase(TestCase):
"""
Tests for :model:`photo.Manufacturer`
"""
def test_str(self):
"""
Test the __str__ method on :model:`photo.Manufacturer`
"""
test_manufacturer = models.Manufacturer(name="test manufacturer")
self.assertEqual(str(test_manufacturer), "test manufacturer",
"Manufacturer.__str__ returned unexpected value.")
class FilmTestCase(TestCase):
"""
Tests for :model:`photo.Film`
"""
@classmethod
def setUpTestData(cls):
cls.manufacturer = models.Manufacturer.objects.create(
name="test manufacturer", short_name="test")
def test_str(self):
"""
Test the __str__ method on :model:`photo.Film`
"""
test_film = models.Film(name="test film",
manufacturer=self.manufacturer)
self.assertEqual(str(test_film), "test test film",
"Film.__str__ returned unexpected value.")
class DeveloperTestCase(TestCase):
"""
Tests for :model:`photo.Developer`
"""
@classmethod
def setUpTestData(cls):
cls.manufacturer = models.Manufacturer.objects.create(
name="test manufacturer", short_name="test")
def test_str(self):
"""
Test the __str__ method on :model:`photo.Developer`
"""
test_developer = models.Developer(name="test developer",
manufacturer=self.manufacturer)
self.assertEqual(str(test_developer), "test test developer",
"Developer.__str__ returned unexpected value.")
class FilmRollTestCase(TestCase):
"""
Tests for :model:`photo.FilmRoll`
"""
@classmethod
def setUpTestData(cls):
manufacturer = models.Manufacturer.objects.create(
name="test manufacturer", short_name="test")
cls.film = models.Film.objects.create(name="test film",
manufacturer=manufacturer,
speed=200, process="B&W")
def test_clean_neither(self):
"""
Test the clean method on :model:`photo.FilmRoll`, in the case where
neither shot_speed or developed_speed are provided.
"""
test_film_roll = models.FilmRoll(name="test film_roll",
film=self.film)
test_film_roll.clean()
self.assertEqual(test_film_roll.shot_speed, self.film.speed,
"shot_speed set incorrectly")
self.assertEqual(test_film_roll.developed_speed, self.film.speed,
"developed_speed set incorrectly")
def test_clean_only_shot_speed(self):
"""
Test the clean method on :model:`photo.FilmRoll`, in the case where
only shot_speed is provided.
"""
test_film_roll = models.FilmRoll(name="test film_roll",
film=self.film,
shot_speed=400)
test_film_roll.clean()
self.assertEqual(test_film_roll.shot_speed, 400,
"shot_speed set incorrectly")
self.assertEqual(test_film_roll.developed_speed, 400,
"developed_speed set incorrectly")
def test_clean_only_developed(self):
"""
Test the clean method on :model:`photo.FilmRoll`, in the case where
only developed_speed is provided.
"""
test_film_roll = models.FilmRoll(name="test film_roll",
film=self.film,
developed_speed=400)
test_film_roll.clean()
self.assertEqual(test_film_roll.shot_speed, self.film.speed,
"shot_speed set incorrectly")
self.assertEqual(test_film_roll.developed_speed, 400,
"developed_speed set incorrectly")
def test_clean_both(self):
"""
Test the clean method on :model:`photo.FilmRoll`, in the case where
both shot_speed and developed_speed are provided.
"""
test_film_roll = models.FilmRoll(name="test film_roll",
film=self.film,
shot_speed=320,
developed_speed=400)
test_film_roll.clean()
self.assertEqual(test_film_roll.shot_speed, 320,
"shot_speed set incorrectly")
self.assertEqual(test_film_roll.developed_speed, 400,
"developed_speed set incorrectly")
def test_str(self):
"""
Test the __str__ method on :model:`photo.FilmRoll`
"""
test_film_roll = models.FilmRoll(name="test film_roll")
self.assertEqual(str(test_film_roll), "test film_roll",
"FilmRoll.__str__ returned unexpected value.")
class PhotoPaperFinishTestCase(TestCase):
"""
Tests for :model:`photo.PhotoPaperFinish`
"""
def test_str(self):
"""
Test the __str__ method on :model:`photo.PhotoPaperFinish`
"""
test_photo_paper_finish = (models.PhotoPaperFinish
(name="test photo_paper_finish"))
self.assertEqual(str(test_photo_paper_finish),
"test photo_paper_finish",
"PhotoPaperFinish.__str__ returned unexpected value.")
class PhotoPaperTestCase(TestCase):
"""
Tests for :model:`photo.PhotoPaper`
"""
@classmethod
def setUpTestData(cls):
cls.manufacturer = models.Manufacturer.objects.create(
name="test manufacturer", short_name="test")
def test_clean_multigrade_valid(self):
"""
Test PhotoPaper.clean, for valid multigrade paper
"""
test_photo_paper = models.PhotoPaper(name="test photo_paper",
multigrade=True)
try:
test_photo_paper.clean()
except ValidationError:
self.fail("Valid PhotoPaper raised ValidationError")
def test_clean_multigrade_invalid(self):
"""
Test PhotoPaper.clean, for invalid multigrade paper
"""
test_photo_paper = models.PhotoPaper(name="test photo_paper",
multigrade=True,
grade=2)
self.assertRaises(ValidationError, test_photo_paper.clean)
def test_clean_graded_valid(self):
"""
Test PhotoPaper.clean, for valid graded paper
"""
test_photo_paper = models.PhotoPaper(name="test photo_paper",
multigrade=False,
grade=2)
try:
test_photo_paper.clean()
except ValidationError:
self.fail("Valid PhotoPaper raised ValidationError")
def test_clean_graded_invalid(self):
"""
Test PhotoPaper.clean, for invalid graded paper
"""
test_photo_paper = models.PhotoPaper(name="test photo_paper",
multigrade=False)
self.assertRaises(ValidationError, test_photo_paper.clean)
def test_str(self):
"""
Test the __str__ method on :model:`photo.PhotoPaper`
"""
test_photo_paper = models.PhotoPaper(name="test photo_paper",
manufacturer=self.manufacturer)
self.assertEqual(str(test_photo_paper), "test test photo_paper",
"PhotoPaper.__str__ returned unexpected value.")
class FrameTestCase(TestCase):
"""
Tests for :model:`photo.Frame`
"""
@classmethod
def setUpTestData(cls):
manufacturer = models.Manufacturer.objects.create(
name="test manufacturer", short_name="test")
film = models.Film.objects.create(name="test film",
manufacturer=manufacturer,
speed=200, process="B&W")
film_format = models.FilmFormat.objects.create(name="test format",
roll_film=True)
cls.film_roll = models.FilmRoll.objects.create(name="test film_roll",
film=film,
format=film_format,
shot_speed=200,
developed_speed=200)
def test_frame_number_positive(self):
"""
Test Frame.frame_number handles positive indexes
"""
test_frame = models.Frame(index=1)
self.assertEqual(test_frame.frame_number(), "1",
"Incorrect frame number returned")
def test_frame_number_double_zero(self):
"""
Test Frame.frame_Number handles double-zero index
"""
test_frame = models.Frame(index=-1)
self.assertEqual(test_frame.frame_number(), "00",
"Incorrect frame number returned")
def test_str(self):
"""
Test the __str__ method on :model:`photo.Frame`
"""
test_frame = models.Frame(index=0, film_roll=self.film_roll)
self.assertEqual(str(test_frame), "test film_roll-0",
"Frame.__str__ returned unexpected value.")
class PrintTestCase(TestCase):
"""
Tests for :model:`photo.Print`
"""
@classmethod
def setUpTestData(cls):
manufacturer = models.Manufacturer.objects.create(name="manufacturer",
short_name="test")
cls.finish_glossy = models.PhotoPaperFinish.objects.create(name=
"glossy")
cls.finish_matte = models.PhotoPaperFinish.objects.create(name=
"matte")
cls.photo_paper = models.PhotoPaper.objects.create(
name="photo_paper", manufacturer=manufacturer, paper_type="RC",
multigrade=True)
cls.photo_paper.finishes.add(cls.finish_glossy)
cls.photo_paper.save()
cls.film_format_35mm = models.FilmFormat.objects.create(name="35mm",
roll_film=True)
cls.film_format_120 = models.FilmFormat.objects.create(name="120",
roll_film=True)
film = models.Film.objects.create(name="film",
manufacturer=manufacturer,
speed=200)
film.formats.add(cls.film_format_35mm)
film.formats.add(cls.film_format_120)
film.save()
cls.film_roll_120 = models.FilmRoll.objects.create(
name="film_roll_120", film=film, format=cls.film_format_120,
shot_speed=200, developed_speed=200)
cls.frame_120 = models.Frame.objects.create(
index=1, film_roll=cls.film_roll_120)
cls.film_roll_35mm = models.FilmRoll.objects.create(
name="film_roll_35mm", film=film, format=cls.film_format_35mm,
shot_speed=200, developed_speed=200)
cls.frame_35mm = models.Frame.objects.create(
index=1, film_roll=cls.film_roll_35mm)
cls.enlarger = models.Enlarger.objects.create(name="enlarger", type=0,
color_head=False)
cls.enlarger.formats.add(cls.film_format_35mm)
cls.enlarger.save()
def test_clean_valid_finish(self):
"""
Test Print.clean, for valid combination of paper and finish.
"""
test_print = models.Print(paper=self.photo_paper,
finish=self.finish_glossy)
try:
test_print.clean()
except ValidationError:
self.fail("Valid Print raised ValidationError")
def test_clean_invalid_finish(self):
"""
Test Print.clean, for invalid combination of paper and finish.
"""
test_print = models.Print(paper=self.photo_paper,
finish=self.finish_matte)
self.assertRaises(ValidationError, test_print.clean)
def test_clean_valid_enlarger(self):
"""
Test Print.clean, for valid combination of format and enlarger.
"""
test_print = models.Print(paper=self.photo_paper,
finish=self.finish_glossy,
frame=self.frame_35mm,
enlarger=self.enlarger)
try:
test_print.clean()
except ValidationError:
self.fail("Valid Print raised ValidationError")
def test_clean_invalid_enlarger(self):
"""
Test Print.clean, for invalid combination of format and enlarger.
"""
test_print = models.Print(paper=self.photo_paper,
finish=self.finish_glossy,
frame=self.frame_120,
enlarger=self.enlarger)
self.assertRaises(ValidationError, test_print.clean)
def test_str(self):
"""
Test the __str__ method on :model:`photo.Print`
"""
test_print = models.Print(date=date(2012, 2, 2), sequence=1)
self.assertEqual(str(test_print), "20120202-1",
"Print.__str__ returned unexpected value.")
class EnlargerTestCase(TestCase):
"""
Tests for :model:`photo.Enlarger`
"""
def test_str(self):
"""
Test the __str__ method on :model:`photo.Enlarger`
"""
test_enlarger = models.Enlarger(name="test enlarger")
self.assertEqual(str(test_enlarger), "test enlarger",
"Enlarger.__str__ return unexpected value.")
|
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Maintainer: Jonathan Lange
# Author: Robert Collins
import StringIO, os, sys
from zope.interface import implements
from zope.interface.verify import verifyObject
from twisted.trial.itrial import IReporter, ITestCase
from twisted.trial import unittest, runner, reporter, util
from twisted.python import failure, log, reflect, filepath
from twisted.python.filepath import FilePath
from twisted.scripts import trial
from twisted.plugins import twisted_trial
from twisted import plugin
from twisted.internet import defer
pyunit = __import__('unittest')
class CapturingDebugger(object):
def __init__(self):
self._calls = []
def runcall(self, *args, **kwargs):
self._calls.append('runcall')
args[0](*args[1:], **kwargs)
class CapturingReporter(object):
"""
Reporter that keeps a log of all actions performed on it.
"""
implements(IReporter)
stream = None
tbformat = None
args = None
separator = None
testsRun = None
def __init__(self, stream=None, tbformat=None, rterrors=None,
publisher=None):
"""
Create a capturing reporter.
"""
self._calls = []
self.shouldStop = False
self._stream = stream
self._tbformat = tbformat
self._rterrors = rterrors
self._publisher = publisher
def startTest(self, method):
"""
Report the beginning of a run of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('startTest')
def stopTest(self, method):
"""
Report the status of a single test method
@param method: an object that is adaptable to ITestMethod
"""
self._calls.append('stopTest')
def cleanupErrors(self, errs):
"""called when the reactor has been left in a 'dirty' state
@param errs: a list of L{twisted.python.failure.Failure}s
"""
self._calls.append('cleanupError')
def addSuccess(self, test):
self._calls.append('addSuccess')
def done(self):
"""
Do nothing. These tests don't care about done.
"""
class TrialRunnerTestsMixin:
"""
Mixin defining tests for L{runner.TrialRunner}.
"""
def tearDown(self):
self.runner._tearDownLogFile()
def test_empty(self):
"""
Empty test method, used by the other tests.
"""
def _getObservers(self):
return log.theLogPublisher.observers
def test_addObservers(self):
"""
Any log system observers L{TrialRunner.run} adds are removed by the
time it returns.
"""
originalCount = len(self._getObservers())
self.runner.run(self.test)
newCount = len(self._getObservers())
self.assertEqual(newCount, originalCount)
def test_logFileAlwaysActive(self):
"""
Test that a new file is opened on each run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObserver)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.runner.run(self.test)
self.assertEqual(len(l), 2)
self.failIf(l[0] is l[1], "Should have created a new file observer")
def test_logFileGetsClosed(self):
"""
Test that file created is closed during the run.
"""
oldSetUpLogFile = self.runner._setUpLogFile
l = []
def setUpLogFile():
oldSetUpLogFile()
l.append(self.runner._logFileObject)
self.runner._setUpLogFile = setUpLogFile
self.runner.run(self.test)
self.assertEqual(len(l), 1)
self.failUnless(l[0].closed)
class TestTrialRunner(TrialRunnerTestsMixin, unittest.TestCase):
"""
Tests for L{runner.TrialRunner} with the feature to turn unclean errors
into warnings disabled.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream)
self.test = TestTrialRunner('test_empty')
def test_publisher(self):
"""
The reporter constructed by L{runner.TrialRunner} is passed
L{twisted.python.log} as the value for the C{publisher} parameter.
"""
result = self.runner._makeResult()
self.assertIdentical(result._publisher, log)
class TrialRunnerWithUncleanWarningsReporter(TrialRunnerTestsMixin,
unittest.TestCase):
"""
Tests for the TrialRunner's interaction with an unclean-error suppressing
reporter.
"""
def setUp(self):
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter, stream=self.stream,
uncleanWarnings=True)
self.test = TestTrialRunner('test_empty')
class DryRunMixin(object):
suppress = [util.suppress(
category=DeprecationWarning,
message="Test visitors deprecated in Twisted 8.0")]
def setUp(self):
self.log = []
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(CapturingReporter,
runner.TrialRunner.DRY_RUN,
stream=self.stream)
self.makeTestFixtures()
def makeTestFixtures(self):
"""
Set C{self.test} and C{self.suite}, where C{self.suite} is an empty
TestSuite.
"""
def test_empty(self):
"""
If there are no tests, the reporter should not receive any events to
report.
"""
result = self.runner.run(runner.TestSuite())
self.assertEqual(result._calls, [])
def test_singleCaseReporting(self):
"""
If we are running a single test, check the reporter starts, passes and
then stops the test during a dry run.
"""
result = self.runner.run(self.test)
self.assertEqual(result._calls, ['startTest', 'addSuccess', 'stopTest'])
def test_testsNotRun(self):
"""
When we are doing a dry run, the tests should not actually be run.
"""
self.runner.run(self.test)
self.assertEqual(self.log, [])
class DryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with Trial tests.
"""
def makeTestFixtures(self):
class MockTest(unittest.TestCase):
def test_foo(test):
self.log.append('test_foo')
self.test = MockTest('test_foo')
self.suite = runner.TestSuite()
class PyUnitDryRunTest(DryRunMixin, unittest.TestCase):
"""
Check that 'dry run' mode works well with stdlib unittest tests.
"""
def makeTestFixtures(self):
class PyunitCase(pyunit.TestCase):
def test_foo(self):
pass
self.test = PyunitCase('test_foo')
self.suite = pyunit.TestSuite()
class TestRunner(unittest.TestCase):
def setUp(self):
self.config = trial.Options()
# whitebox hack a reporter in, because plugins are CACHED and will
# only reload if the FILE gets changed.
parts = reflect.qual(CapturingReporter).split('.')
package = '.'.join(parts[:-1])
klass = parts[-1]
plugins = [twisted_trial._Reporter(
"Test Helper Reporter",
package,
description="Utility for unit testing.",
longOpt="capturing",
shortOpt=None,
klass=klass)]
# XXX There should really be a general way to hook the plugin system
# for tests.
def getPlugins(iface, *a, **kw):
self.assertEqual(iface, IReporter)
return plugins + list(self.original(iface, *a, **kw))
self.original = plugin.getPlugins
plugin.getPlugins = getPlugins
self.standardReport = ['startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
def tearDown(self):
plugin.getPlugins = self.original
def parseOptions(self, args):
self.config.parseOptions(args)
def getRunner(self):
r = trial._makeRunner(self.config)
r.stream = StringIO.StringIO()
# XXX The runner should always take care of cleaning this up itself.
# It's not clear why this is necessary. The runner always tears down
# its log file.
self.addCleanup(r._tearDownLogFile)
# XXX The runner should always take care of cleaning this up itself as
# well. It's necessary because TrialRunner._setUpTestdir might raise
# an exception preventing Reporter.done from being run, leaving the
# observer added by Reporter.__init__ still present in the system.
# Something better needs to happen inside
# TrialRunner._runWithoutDecoration to remove the need for this cludge.
r._log = log.LogPublisher()
return r
def test_runner_can_get_reporter(self):
self.parseOptions([])
result = self.config['reporter']
runner = self.getRunner()
self.assertEqual(result, runner._makeResult().__class__)
def test_runner_get_result(self):
self.parseOptions([])
runner = self.getRunner()
result = runner._makeResult()
self.assertEqual(result.__class__, self.config['reporter'])
def test_uncleanWarningsOffByDefault(self):
"""
By default Trial sets the 'uncleanWarnings' option on the runner to
False. This means that dirty reactor errors will be reported as
errors. See L{test_reporter.TestDirtyReactor}.
"""
self.parseOptions([])
runner = self.getRunner()
self.assertNotIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_getsUncleanWarnings(self):
"""
Specifying '--unclean-warnings' on the trial command line will cause
reporters to be wrapped in a device which converts unclean errors to
warnings. See L{test_reporter.TestDirtyReactor} for implications.
"""
self.parseOptions(['--unclean-warnings'])
runner = self.getRunner()
self.assertIsInstance(runner._makeResult(),
reporter.UncleanWarningsReporterWrapper)
def test_runner_working_directory(self):
self.parseOptions(['--temp-directory', 'some_path'])
runner = self.getRunner()
self.assertEqual(runner.workingDirectory, 'some_path')
def test_concurrentImplicitWorkingDirectory(self):
"""
If no working directory is explicitly specified and the default
working directory is in use by another runner, L{TrialRunner.run}
selects a different default working directory to use.
"""
self.parseOptions([])
# Make sure we end up with the same working directory after this test
# as we had before it.
self.addCleanup(os.chdir, os.getcwd())
# Make a new directory and change into it. This isolates us from state
# that other tests might have dumped into this process's temp
# directory.
runDirectory = FilePath(self.mktemp())
runDirectory.makedirs()
os.chdir(runDirectory.path)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
where = {}
class ConcurrentCase(unittest.TestCase):
def test_first(self):
"""
Start a second test run which will have a default working
directory which is the same as the working directory of the
test run already in progress.
"""
# Change the working directory to the value it had before this
# test suite was started.
where['concurrent'] = subsequentDirectory = os.getcwd()
os.chdir(runDirectory.path)
self.addCleanup(os.chdir, subsequentDirectory)
secondRunner.run(ConcurrentCase('test_second'))
def test_second(self):
"""
Record the working directory for later analysis.
"""
where['record'] = os.getcwd()
result = firstRunner.run(ConcurrentCase('test_first'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
self.assertEqual(
where, {
'concurrent': runDirectory.child('_trial_temp').path,
'record': runDirectory.child('_trial_temp-1').path})
def test_concurrentExplicitWorkingDirectory(self):
"""
If a working directory which is already in use is explicitly specified,
L{TrialRunner.run} raises L{_WorkingDirectoryBusy}.
"""
self.parseOptions(['--temp-directory', os.path.abspath(self.mktemp())])
initialDirectory = os.getcwd()
self.addCleanup(os.chdir, initialDirectory)
firstRunner = self.getRunner()
secondRunner = self.getRunner()
class ConcurrentCase(unittest.TestCase):
def test_concurrent(self):
"""
Try to start another runner in the same working directory and
assert that it raises L{_WorkingDirectoryBusy}.
"""
self.assertRaises(
util._WorkingDirectoryBusy,
secondRunner.run, ConcurrentCase('test_failure'))
def test_failure(self):
"""
Should not be called, always fails.
"""
self.fail("test_failure should never be called.")
result = firstRunner.run(ConcurrentCase('test_concurrent'))
bad = result.errors + result.failures
if bad:
self.fail(bad[0][1])
def test_runner_normal(self):
self.parseOptions(['--temp-directory', self.mktemp(),
'--reporter', 'capturing',
'twisted.trial.test.sample'])
my_runner = self.getRunner()
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
def test_runner_debug(self):
self.parseOptions(['--reporter', 'capturing',
'--debug', 'twisted.trial.test.sample'])
my_runner = self.getRunner()
debugger = CapturingDebugger()
def get_debugger():
return debugger
my_runner._getDebugger = get_debugger
loader = runner.TestLoader()
suite = loader.loadByName('twisted.trial.test.sample', True)
result = my_runner.run(suite)
self.assertEqual(self.standardReport, result._calls)
self.assertEqual(['runcall'], debugger._calls)
class RemoveSafelyTests(unittest.TestCase):
"""
Tests for L{_removeSafely}.
"""
def test_removeSafelyNoTrialMarker(self):
"""
If a path doesn't contain a node named C{"_trial_marker"}, that path is
not removed by L{runner._removeSafely} and a L{runner._NoTrialMarker}
exception is raised instead.
"""
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
self.assertRaises(util._NoTrialMarker, util._removeSafely, dirPath)
def test_removeSafelyRemoveFailsMoveSucceeds(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError()
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() raises an OSError
dirPath.remove = dummyRemove
util._removeSafely(dirPath)
self.assertIn("could not remove FilePath", out.getvalue())
def test_removeSafelyRemoveFailsMoveFails(self):
"""
If an L{OSError} is raised while removing a path in
L{runner._removeSafely}, an attempt is made to move the path to a new
name. If that attempt fails, the L{OSError} is re-raised.
"""
def dummyRemove():
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path removal fails.
"""
raise OSError("path removal failed")
def dummyMoveTo(path):
"""
Raise an C{OSError} to emulate the branch of L{runner._removeSafely}
in which path movement fails.
"""
raise OSError("path movement failed")
# Patch stdout so we can check the print statements in _removeSafely
out = StringIO.StringIO()
self.patch(sys, 'stdout', out)
# Set up a trial directory with a _trial_marker
directory = self.mktemp()
os.mkdir(directory)
dirPath = filepath.FilePath(directory)
dirPath.child('_trial_marker').touch()
# Ensure that path.remove() and path.moveTo() both raise OSErrors
dirPath.remove = dummyRemove
dirPath.moveTo = dummyMoveTo
error = self.assertRaises(OSError, util._removeSafely, dirPath)
self.assertEqual(str(error), "path movement failed")
self.assertIn("could not remove FilePath", out.getvalue())
class TestTrialSuite(unittest.TestCase):
def test_imports(self):
# FIXME, HTF do you test the reactor can be cleaned up ?!!!
from twisted.trial.runner import TrialSuite
class TestUntilFailure(unittest.TestCase):
class FailAfter(unittest.TestCase):
"""
A test case that fails when run 3 times in a row.
"""
count = []
def test_foo(self):
self.count.append(None)
if len(self.count) == 3:
self.fail('Count reached 3')
def setUp(self):
TestUntilFailure.FailAfter.count = []
self.test = TestUntilFailure.FailAfter('test_foo')
self.stream = StringIO.StringIO()
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream)
def test_runUntilFailure(self):
"""
Test that the runUntilFailure method of the runner actually fail after
a few runs.
"""
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEqual(self._getFailures(result), 1)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result.
"""
return len(result.failures)
def test_runUntilFailureDecorate(self):
"""
C{runUntilFailure} doesn't decorate the tests uselessly: it does it one
time when run starts, but not at each turn.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(decorated), 1)
self.assertEqual(decorated, [(self.test, ITestCase)])
def test_runUntilFailureForceGCDecorate(self):
"""
C{runUntilFailure} applies the force-gc decoration after the standard
L{ITestCase} decoration, but only one time.
"""
decorated = []
def decorate(test, interface):
decorated.append((test, interface))
return test
self.patch(unittest, "decorate", decorate)
self.runner._forceGarbageCollection = True
result = self.runner.runUntilFailure(self.test)
self.assertEqual(result.testsRun, 1)
self.assertEqual(len(decorated), 2)
self.assertEqual(decorated,
[(self.test, ITestCase),
(self.test, unittest._ForceGarbageCollectionDecorator)])
class UncleanUntilFailureTests(TestUntilFailure):
"""
Test that the run-until-failure feature works correctly with the unclean
error suppressor.
"""
def setUp(self):
TestUntilFailure.setUp(self)
self.runner = runner.TrialRunner(reporter.Reporter, stream=self.stream,
uncleanWarnings=True)
def _getFailures(self, result):
"""
Get the number of failures that were reported to a result that
is wrapped in an UncleanFailureWrapper.
"""
return len(result._originalReporter.failures)
class BreakingSuite(runner.TestSuite):
"""
A L{TestSuite} that logs an error when it is run.
"""
def run(self, result):
try:
raise RuntimeError("error that occurs outside of a test")
except RuntimeError:
log.err(failure.Failure())
class TestLoggedErrors(unittest.TestCase):
"""
It is possible for an error generated by a test to be logged I{outside} of
any test. The log observers constructed by L{TestCase} won't catch these
errors. Here we try to generate such errors and ensure they are reported to
a L{TestResult} object.
"""
def tearDown(self):
self.flushLoggedErrors(RuntimeError)
def test_construct(self):
"""
Check that we can construct a L{runner.LoggedSuite} and that it
starts empty.
"""
suite = runner.LoggedSuite()
self.assertEqual(suite.countTestCases(), 0)
def test_capturesError(self):
"""
Chek that a L{LoggedSuite} reports any logged errors to its result.
"""
result = reporter.TestResult()
suite = runner.LoggedSuite([BreakingSuite()])
suite.run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.errors[0][0].id(), runner.NOT_IN_TEST)
self.failUnless(result.errors[0][1].check(RuntimeError))
class TestTestHolder(unittest.TestCase):
def setUp(self):
self.description = "description"
self.holder = runner.TestHolder(self.description)
def test_holder(self):
"""
Check that L{runner.TestHolder} takes a description as a parameter
and that this description is returned by the C{id} and
C{shortDescription} methods.
"""
self.assertEqual(self.holder.id(), self.description)
self.assertEqual(self.holder.shortDescription(), self.description)
def test_holderImplementsITestCase(self):
"""
L{runner.TestHolder} implements L{ITestCase}.
"""
self.assertIdentical(self.holder, ITestCase(self.holder))
self.assertTrue(
verifyObject(ITestCase, self.holder),
"%r claims to provide %r but does not do so correctly."
% (self.holder, ITestCase))
def test_runsWithStandardResult(self):
"""
A L{runner.TestHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertTrue(result.wasSuccessful())
self.assertEqual(1, result.testsRun)
class ErrorHolderTestsMixin(object):
"""
This mixin defines test methods which can be applied to a
L{runner.ErrorHolder} constructed with either a L{Failure} or a
C{exc_info}-style tuple.
Subclass this and implement C{setUp} to create C{self.holder} referring to a
L{runner.ErrorHolder} instance and C{self.error} referring to a L{Failure}
which the holder holds.
"""
exceptionForTests = ZeroDivisionError('integer division or modulo by zero')
class TestResultStub(object):
"""
Stub for L{TestResult}.
"""
def __init__(self):
self.errors = []
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test, error):
self.errors.append((test, error))
def test_runsWithStandardResult(self):
"""
A L{runner.ErrorHolder} can run against the standard Python
C{TestResult}.
"""
result = pyunit.TestResult()
self.holder.run(result)
self.assertFalse(result.wasSuccessful())
self.assertEqual(1, result.testsRun)
def test_run(self):
"""
L{runner.ErrorHolder} adds an error to the result when run.
"""
self.holder.run(self.result)
self.assertEqual(
self.result.errors,
[(self.holder, (self.error.type, self.error.value, self.error.tb))])
def test_call(self):
"""
L{runner.ErrorHolder} adds an error to the result when called.
"""
self.holder(self.result)
self.assertEqual(
self.result.errors,
[(self.holder, (self.error.type, self.error.value, self.error.tb))])
def test_countTestCases(self):
"""
L{runner.ErrorHolder.countTestCases} always returns 0.
"""
self.assertEqual(self.holder.countTestCases(), 0)
def test_repr(self):
"""
L{runner.ErrorHolder.__repr__} returns a string describing the error it
holds.
"""
self.assertEqual(repr(self.holder),
"<ErrorHolder description='description' "
"error=ZeroDivisionError('integer division or modulo by zero',)>")
class FailureHoldingErrorHolderTests(ErrorHolderTestsMixin, TestTestHolder):
"""
Tests for L{runner.ErrorHolder} behaving similarly to L{runner.TestHolder}
when constructed with a L{Failure} representing its error.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
raise self.exceptionForTests
except ZeroDivisionError:
self.error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, self.error)
self.result = self.TestResultStub()
class ExcInfoHoldingErrorHolderTests(ErrorHolderTestsMixin, TestTestHolder):
"""
Tests for L{runner.ErrorHolder} behaving similarly to L{runner.TestHolder}
when constructed with a C{exc_info}-style tuple representing its error.
"""
def setUp(self):
self.description = "description"
# make a real Failure so we can construct ErrorHolder()
try:
raise self.exceptionForTests
except ZeroDivisionError:
exceptionInfo = sys.exc_info()
self.error = failure.Failure()
self.holder = runner.ErrorHolder(self.description, exceptionInfo)
self.result = self.TestResultStub()
class TestMalformedMethod(unittest.TestCase):
"""
Test that trial manages when test methods don't have correct signatures.
"""
class ContainMalformed(unittest.TestCase):
"""
This TestCase holds malformed test methods that trial should handle.
"""
def test_foo(self, blah):
pass
def test_bar():
pass
test_spam = defer.deferredGenerator(test_bar)
def _test(self, method):
"""
Wrapper for one of the test method of L{ContainMalformed}.
"""
stream = StringIO.StringIO()
trialRunner = runner.TrialRunner(reporter.Reporter, stream=stream)
test = TestMalformedMethod.ContainMalformed(method)
result = trialRunner.run(test)
self.assertEqual(result.testsRun, 1)
self.failIf(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
def test_extraArg(self):
"""
Test when the method has extra (useless) arguments.
"""
self._test('test_foo')
def test_noArg(self):
"""
Test when the method doesn't have even self as argument.
"""
self._test('test_bar')
def test_decorated(self):
"""
Test a decorated method also fails.
"""
self._test('test_spam')
class DestructiveTestSuiteTestCase(unittest.TestCase):
"""
Test for L{runner.DestructiveTestSuite}.
"""
def test_basic(self):
"""
Thes destructive test suite should run the tests normally.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo(test):
called.append(True)
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEqual(called, [])
suite.run(result)
self.assertEqual(called, [True])
self.assertEqual(suite.countTestCases(), 0)
def test_shouldStop(self):
"""
Test the C{shouldStop} management: raising a C{KeyboardInterrupt} must
interrupt the suite.
"""
called = []
class MockTest(unittest.TestCase):
def test_foo1(test):
called.append(1)
def test_foo2(test):
raise KeyboardInterrupt()
def test_foo3(test):
called.append(2)
result = reporter.TestResult()
loader = runner.TestLoader()
loader.suiteFactory = runner.DestructiveTestSuite
suite = loader.loadClass(MockTest)
self.assertEqual(called, [])
suite.run(result)
self.assertEqual(called, [1])
# The last test shouldn't have been run
self.assertEqual(suite.countTestCases(), 1)
def test_cleanup(self):
"""
Checks that the test suite cleanups its tests during the run, so that
it ends empty.
"""
class MockTest(unittest.TestCase):
def test_foo(test):
pass
test = MockTest('test_foo')
result = reporter.TestResult()
suite = runner.DestructiveTestSuite([test])
self.assertEqual(suite.countTestCases(), 1)
suite.run(result)
self.assertEqual(suite.countTestCases(), 0)
class TestRunnerDeprecation(unittest.TestCase):
class FakeReporter(reporter.Reporter):
"""
Fake reporter that does *not* implement done() but *does* implement
printErrors, separator, printSummary, stream, write and writeln
without deprecations.
"""
done = None
separator = None
stream = None
def printErrors(self, *args):
pass
def printSummary(self, *args):
pass
def write(self, *args):
pass
def writeln(self, *args):
pass
def test_reporterDeprecations(self):
"""
The runner emits a warning if it is using a result that doesn't
implement 'done'.
"""
trialRunner = runner.TrialRunner(None)
result = self.FakeReporter()
trialRunner._makeResult = lambda: result
def f():
# We have to use a pyunit test, otherwise we'll get deprecation
# warnings about using iterate() in a test.
trialRunner.run(pyunit.TestCase('id'))
self.assertWarns(
DeprecationWarning,
"%s should implement done() but doesn't. Falling back to "
"printErrors() and friends." % reflect.qual(result.__class__),
__file__, f)
|
|
# -*- coding: utf-8 -*-
"""
IDL Ref:
https://thrift.apache.org/docs/idl
"""
from __future__ import absolute_import
import collections
import os
import sys
import types
from ply import lex, yacc
from .lexer import * # noqa
from .exc import ThriftParserError, ThriftGrammerError
from thriftpy2._compat import urlopen, urlparse, PY3
from ..thrift import gen_init, TType, TPayload, TException
def p_error(p):
if p is None:
raise ThriftGrammerError('Grammer error at EOF')
raise ThriftGrammerError('Grammer error %r at line %d' %
(p.value, p.lineno))
def p_start(p):
'''start : header definition'''
def p_header(p):
'''header : header_unit_ header
|'''
def p_header_unit_(p):
'''header_unit_ : header_unit ';'
| header_unit'''
def p_header_unit(p):
'''header_unit : include
| cpp_include
| namespace'''
def p_include(p):
'''include : INCLUDE LITERAL'''
thrift = thrift_stack[-1]
if thrift.__thrift_file__ is None:
raise ThriftParserError('Unexpected include statement while loading '
'from file like object.')
replace_include_dirs = [os.path.dirname(thrift.__thrift_file__)] \
+ include_dirs_
for include_dir in replace_include_dirs:
path = os.path.join(include_dir, p[2])
if os.path.exists(path):
child = parse(path)
setattr(thrift, child.__name__, child)
_add_thrift_meta('includes', child)
return
raise ThriftParserError(('Couldn\'t include thrift %s in any '
'directories provided') % p[2])
def p_cpp_include(p):
'''cpp_include : CPP_INCLUDE LITERAL'''
def p_namespace(p):
'''namespace : NAMESPACE namespace_scope IDENTIFIER'''
# namespace is useless in thriftpy2
# if p[2] == 'py' or p[2] == '*':
# setattr(thrift_stack[-1], '__name__', p[3])
def p_namespace_scope(p):
'''namespace_scope : '*'
| IDENTIFIER'''
p[0] = p[1]
def p_sep(p):
'''sep : ','
| ';'
'''
def p_definition(p):
'''definition : definition definition_unit_
|'''
def p_definition_unit_(p):
'''definition_unit_ : definition_unit ';'
| definition_unit'''
def p_definition_unit(p):
'''definition_unit : const
| ttype
'''
def p_const(p):
'''const : CONST field_type IDENTIFIER '=' const_value
| CONST field_type IDENTIFIER '=' const_value sep'''
try:
val = _cast(p[2], p.lineno(3))(p[5])
except AssertionError:
raise ThriftParserError('Type error for constant %s at line %d' %
(p[3], p.lineno(3)))
setattr(thrift_stack[-1], p[3], val)
_add_thrift_meta('consts', val)
def p_const_value(p):
'''const_value : INTCONSTANT
| DUBCONSTANT
| LITERAL
| BOOLCONSTANT
| const_list
| const_map
| const_ref'''
p[0] = p[1]
def p_const_list(p):
'''const_list : '[' const_list_seq ']' '''
p[0] = p[2]
def p_const_list_seq(p):
'''const_list_seq : const_value sep const_list_seq
| const_value const_list_seq
|'''
_parse_seq(p)
def p_const_map(p):
'''const_map : '{' const_map_seq '}' '''
p[0] = dict(p[2])
def p_const_map_seq(p):
'''const_map_seq : const_map_item sep const_map_seq
| const_map_item const_map_seq
|'''
_parse_seq(p)
def p_const_map_item(p):
'''const_map_item : const_value ':' const_value '''
p[0] = [p[1], p[3]]
def p_const_ref(p):
'''const_ref : IDENTIFIER'''
child = thrift_stack[-1]
for name in p[1].split('.'):
father = child
child = getattr(child, name, None)
if child is None:
raise ThriftParserError('Cann\'t find name %r at line %d'
% (p[1], p.lineno(1)))
if _get_ttype(child) is None or _get_ttype(father) == TType.I32:
# child is a constant or enum value
p[0] = child
else:
raise ThriftParserError('No enum value or constant found '
'named %r' % p[1])
def p_ttype(p):
'''ttype : typedef
| enum
| struct
| union
| exception
| service'''
def p_typedef(p):
'''typedef : TYPEDEF field_type IDENTIFIER type_annotations'''
setattr(thrift_stack[-1], p[3], p[2])
def p_enum(p): # noqa
'''enum : ENUM IDENTIFIER '{' enum_seq '}' type_annotations'''
val = _make_enum(p[2], p[4])
setattr(thrift_stack[-1], p[2], val)
_add_thrift_meta('enums', val)
def p_enum_seq(p):
'''enum_seq : enum_item sep enum_seq
| enum_item enum_seq
|'''
_parse_seq(p)
def p_enum_item(p):
'''enum_item : IDENTIFIER '=' INTCONSTANT type_annotations
| IDENTIFIER type_annotations
|'''
if len(p) == 5:
p[0] = [p[1], p[3]]
elif len(p) == 3:
p[0] = [p[1], None]
def p_struct(p):
'''struct : seen_struct '{' field_seq '}' type_annotations'''
val = _fill_in_struct(p[1], p[3])
_add_thrift_meta('structs', val)
def p_seen_struct(p):
'''seen_struct : STRUCT IDENTIFIER '''
val = _make_empty_struct(p[2])
setattr(thrift_stack[-1], p[2], val)
p[0] = val
def p_union(p):
'''union : seen_union '{' field_seq '}' '''
val = _fill_in_struct(p[1], p[3])
_add_thrift_meta('unions', val)
def p_seen_union(p):
'''seen_union : UNION IDENTIFIER '''
val = _make_empty_struct(p[2])
setattr(thrift_stack[-1], p[2], val)
p[0] = val
def p_exception(p):
'''exception : EXCEPTION IDENTIFIER '{' field_seq '}' type_annotations '''
val = _make_struct(p[2], p[4], base_cls=TException)
setattr(thrift_stack[-1], p[2], val)
_add_thrift_meta('exceptions', val)
def p_simple_service(p):
'''simple_service : SERVICE IDENTIFIER '{' function_seq '}'
| SERVICE IDENTIFIER EXTENDS IDENTIFIER '{' function_seq '}'
'''
thrift = thrift_stack[-1]
if len(p) == 8:
extends = thrift
for name in p[4].split('.'):
extends = getattr(extends, name, None)
if extends is None:
raise ThriftParserError('Can\'t find service %r for '
'service %r to extend' %
(p[4], p[2]))
if not hasattr(extends, 'thrift_services'):
raise ThriftParserError('Can\'t extends %r, not a service'
% p[4])
else:
extends = None
val = _make_service(p[2], p[len(p) - 2], extends)
setattr(thrift, p[2], val)
_add_thrift_meta('services', val)
def p_service(p):
'''service : simple_service type_annotations'''
p[0] = p[1]
def p_simple_function(p):
'''simple_function : ONEWAY function_type IDENTIFIER '(' field_seq ')'
| ONEWAY function_type IDENTIFIER '(' field_seq ')' throws
| function_type IDENTIFIER '(' field_seq ')' throws
| function_type IDENTIFIER '(' field_seq ')' '''
if p[1] == 'oneway':
oneway = True
base = 1
else:
oneway = False
base = 0
if p[len(p) - 1] == ')':
throws = []
else:
throws = p[len(p) - 1]
p[0] = [oneway, p[base + 1], p[base + 2], p[base + 4], throws]
def p_function(p):
'''function : simple_function type_annotations'''
p[0] = p[1]
def p_function_seq(p):
'''function_seq : function sep function_seq
| function function_seq
|'''
_parse_seq(p)
def p_throws(p):
'''throws : THROWS '(' field_seq ')' '''
p[0] = p[3]
def p_function_type(p):
'''function_type : field_type
| VOID'''
if p[1] == 'void':
p[0] = TType.VOID
else:
p[0] = p[1]
def p_field_seq(p):
'''field_seq : field sep field_seq
| field field_seq
|'''
_parse_seq(p)
def p_simple_field(p):
'''simple_field : field_id field_req field_type IDENTIFIER
| field_id field_req field_type IDENTIFIER '=' const_value
'''
if len(p) == 7:
try:
val = _cast(p[3])(p[6])
except AssertionError:
raise ThriftParserError(
'Type error for field %s '
'at line %d' % (p[4], p.lineno(4)))
else:
val = None
p[0] = [p[1], p[2], p[3], p[4], val]
def p_field(p):
'''field : simple_field type_annotations'''
p[0] = p[1]
def p_field_id(p):
'''field_id : INTCONSTANT ':' '''
p[0] = p[1]
def p_field_req(p):
'''field_req : REQUIRED
| OPTIONAL
|'''
if len(p) == 2:
p[0] = p[1] == 'required'
elif len(p) == 1:
p[0] = False # default: required=False
def p_field_type(p):
'''field_type : ref_type
| definition_type'''
p[0] = p[1]
class CurrentIncompleteType(dict):
index = -1
def set_info(self, info):
self[self.index] = info
self.index -= 1
return self.index + 1
incomplete_type = CurrentIncompleteType()
def p_ref_type(p):
'''ref_type : IDENTIFIER'''
ref_type = thrift_stack[-1]
for attr in dir(ref_type):
if attr in {'__doc__', '__loader__', '__name__', '__package__',
'__spec__', '__thrift_file__', '__thrift_meta__'}:
continue
if p[1].startswith(attr + '.'):
name = p[1][len(attr)+1:]
included_ref_type = getattr(ref_type, attr)
resolved_ref_type = getattr(included_ref_type, name, None)
if resolved_ref_type is not None:
ref_type = resolved_ref_type
break
else:
for index, name in enumerate(p[1].split('.')):
ref_type = getattr(ref_type, name, None)
if ref_type is None:
if index != len(p[1].split('.')) - 1:
raise ThriftParserError('No type found: %r, at line %d' %
(p[1], p.lineno(1)))
p[0] = incomplete_type.set_info((p[1], p.lineno(1)))
return
if hasattr(ref_type, '_ttype'):
p[0] = getattr(ref_type, '_ttype'), ref_type
else:
p[0] = ref_type
def p_simple_base_type(p): # noqa
'''simple_base_type : BOOL
| BYTE
| I8
| I16
| I32
| I64
| DOUBLE
| STRING
| BINARY'''
if p[1] == 'bool':
p[0] = TType.BOOL
if p[1] == 'byte' or p[1] == 'i8':
p[0] = TType.BYTE
if p[1] == 'i16':
p[0] = TType.I16
if p[1] == 'i32':
p[0] = TType.I32
if p[1] == 'i64':
p[0] = TType.I64
if p[1] == 'double':
p[0] = TType.DOUBLE
if p[1] == 'string':
p[0] = TType.STRING
if p[1] == 'binary':
p[0] = TType.BINARY
def p_base_type(p):
'''base_type : simple_base_type type_annotations'''
p[0] = p[1]
def p_simple_container_type(p):
'''simple_container_type : map_type
| list_type
| set_type'''
p[0] = p[1]
def p_container_type(p):
'''container_type : simple_container_type type_annotations'''
p[0] = p[1]
def p_map_type(p):
'''map_type : MAP '<' field_type ',' field_type '>' '''
p[0] = TType.MAP, (p[3], p[5])
def p_list_type(p):
'''list_type : LIST '<' field_type '>' '''
p[0] = TType.LIST, p[3]
def p_set_type(p):
'''set_type : SET '<' field_type '>' '''
p[0] = TType.SET, p[3]
def p_definition_type(p):
'''definition_type : base_type
| container_type'''
p[0] = p[1]
def p_type_annotations(p):
'''type_annotations : '(' type_annotation_seq ')'
|'''
if len(p) == 4:
p[0] = p[2]
else:
p[0] = None
def p_type_annotation_seq(p):
'''type_annotation_seq : type_annotation sep type_annotation_seq
| type_annotation type_annotation_seq
|'''
_parse_seq(p)
def p_type_annotation(p):
'''type_annotation : IDENTIFIER '=' LITERAL
| IDENTIFIER '''
if len(p) == 4:
p[0] = p[1], p[3]
else:
p[0] = p[1], None # Without Value
thrift_stack = []
include_dirs_ = ['.']
thrift_cache = {}
def parse(path, module_name=None, include_dirs=None, include_dir=None,
lexer=None, parser=None, enable_cache=True, encoding='utf-8'):
"""Parse a single thrift file to module object, e.g.::
>>> from thriftpy2.parser.parser import parse
>>> note_thrift = parse("path/to/note.thrift")
<module 'note_thrift' (built-in)>
:param path: file path to parse, should be a string ending with '.thrift'.
:param module_name: the name for parsed module, the default is the basename
without extension of `path`.
:param include_dirs: directories to find thrift files while processing
the `include` directive, by default: ['.'].
:param include_dir: directory to find child thrift files. Note this keyword
parameter will be deprecated in the future, it exists
for compatiable reason. If it's provided (not `None`),
it will be appended to `include_dirs`.
:param lexer: ply lexer to use, if not provided, `parse` will new one.
:param parser: ply parser to use, if not provided, `parse` will new one.
:param enable_cache: if this is set to be `True`, parsed module will be
cached, this is enabled by default. If `module_name`
is provided, use it as cache key, else use the `path`.
"""
if os.name == 'nt' and sys.version_info[0] < 3:
os.path.samefile = lambda f1, f2: os.stat(f1) == os.stat(f2)
# dead include checking on current stack
for thrift in thrift_stack:
if thrift.__thrift_file__ is not None and \
os.path.samefile(path, thrift.__thrift_file__):
raise ThriftParserError('Dead including on %s' % path)
global thrift_cache
cache_key = module_name or os.path.normpath(path)
if enable_cache and cache_key in thrift_cache:
return thrift_cache[cache_key]
if lexer is None:
lexer = lex.lex()
if parser is None:
parser = yacc.yacc(debug=False, write_tables=0)
global include_dirs_
if include_dirs is not None:
include_dirs_ = include_dirs
if include_dir is not None:
include_dirs_.append(include_dir)
if not path.endswith('.thrift'):
raise ThriftParserError('Path should end with .thrift')
url_scheme = urlparse(path).scheme
if url_scheme == 'file':
with open(urlparse(path).netloc + urlparse(path).path) as fh:
data = fh.read()
elif len(url_scheme) <= 1:
with open(path) as fh:
data = fh.read()
elif url_scheme in ('http', 'https'):
data = urlopen(path).read()
else:
raise ThriftParserError('thriftpy2 does not support generating module '
'with path in protocol \'{}\''.format(
url_scheme))
if PY3 and isinstance(data, bytes):
data = data.decode(encoding)
if module_name is not None and not module_name.endswith('_thrift'):
raise ThriftParserError('thriftpy2 can only generate module with '
'\'_thrift\' suffix')
if module_name is None:
basename = os.path.basename(path)
module_name = os.path.splitext(basename)[0]
thrift = types.ModuleType(module_name)
setattr(thrift, '__thrift_file__', path)
thrift_stack.append(thrift)
lexer.lineno = 1
parser.parse(data)
thrift_stack.pop()
if enable_cache:
thrift_cache[cache_key] = thrift
return thrift
def parse_fp(source, module_name, lexer=None, parser=None, enable_cache=True):
"""Parse a file-like object to thrift module object, e.g.::
>>> from thriftpy2.parser.parser import parse_fp
>>> with open("path/to/note.thrift") as fp:
parse_fp(fp, "note_thrift")
<module 'note_thrift' (built-in)>
:param source: file-like object, expected to have a method named `read`.
:param module_name: the name for parsed module, shoule be endswith
'_thrift'.
:param lexer: ply lexer to use, if not provided, `parse` will new one.
:param parser: ply parser to use, if not provided, `parse` will new one.
:param enable_cache: if this is set to be `True`, parsed module will be
cached by `module_name`, this is enabled by default.
"""
if not module_name.endswith('_thrift'):
raise ThriftParserError('thriftpy2 can only generate module with '
'\'_thrift\' suffix')
if enable_cache and module_name in thrift_cache:
return thrift_cache[module_name]
if not hasattr(source, 'read'):
raise ThriftParserError('Expected `source` to be a file-like object '
'with a method named \'read\'')
if lexer is None:
lexer = lex.lex()
if parser is None:
parser = yacc.yacc(debug=False, write_tables=0)
data = source.read()
thrift = types.ModuleType(module_name)
setattr(thrift, '__thrift_file__', None)
thrift_stack.append(thrift)
lexer.lineno = 1
parser.parse(data)
thrift_stack.pop()
if enable_cache:
thrift_cache[module_name] = thrift
return thrift
def _add_thrift_meta(key, val):
thrift = thrift_stack[-1]
if not hasattr(thrift, '__thrift_meta__'):
meta = collections.defaultdict(list)
setattr(thrift, '__thrift_meta__', meta)
else:
meta = getattr(thrift, '__thrift_meta__')
meta[key].append(val)
def _parse_seq(p):
if len(p) == 4:
p[0] = [p[1]] + p[3]
elif len(p) == 3:
p[0] = [p[1]] + p[2]
elif len(p) == 1:
p[0] = []
def _cast(t, linno=0): # noqa
if isinstance(t, int) and t < 0:
return _lazy_cast_const(t, linno)
if t == TType.BOOL:
return _cast_bool
if t == TType.BYTE:
return _cast_byte
if t == TType.I16:
return _cast_i16
if t == TType.I32:
return _cast_i32
if t == TType.I64:
return _cast_i64
if t == TType.DOUBLE:
return _cast_double
if t == TType.STRING:
return _cast_string
if t == TType.BINARY:
return _cast_binary
if t[0] == TType.LIST:
return _cast_list(t)
if t[0] == TType.SET:
return _cast_set(t)
if t[0] == TType.MAP:
return _cast_map(t)
if t[0] == TType.I32:
return _cast_enum(t)
if t[0] == TType.STRUCT:
return _cast_struct(t)
def _lazy_cast_const(t, linno):
def _inner_cast(v):
return ('UNKNOWN_CONST', t, v, linno)
return _inner_cast
def _cast_bool(v):
assert isinstance(v, (bool, int))
return bool(v)
def _cast_byte(v):
assert isinstance(v, int)
return v
def _cast_i16(v):
assert isinstance(v, int)
return v
def _cast_i32(v):
assert isinstance(v, int)
return v
def _cast_i64(v):
assert isinstance(v, int)
return v
def _cast_double(v):
assert isinstance(v, (float, int))
return float(v)
def _cast_string(v):
assert isinstance(v, str)
return v
def _cast_binary(v):
assert isinstance(v, str)
return v
def _cast_list(t):
assert t[0] == TType.LIST
def __cast_list(v):
assert isinstance(v, list)
map(_cast(t[1]), v)
return v
return __cast_list
def _cast_set(t):
assert t[0] == TType.SET
def __cast_set(v):
if len(v) == 0 and isinstance(v, dict):
v = set()
assert isinstance(v, (list, set))
map(_cast(t[1]), v)
if not isinstance(v, set):
return set(v)
return v
return __cast_set
def _cast_map(t):
assert t[0] == TType.MAP
def __cast_map(v):
assert isinstance(v, dict)
for key in v:
v[_cast(t[1][0])(key)] = \
_cast(t[1][1])(v[key])
return v
return __cast_map
def _cast_enum(t):
assert t[0] == TType.I32
def __cast_enum(v):
assert isinstance(v, int)
if v in t[1]._VALUES_TO_NAMES:
return v
raise ThriftParserError('Couldn\'t find a named value in enum '
'%s for value %d' % (t[1].__name__, v))
return __cast_enum
def _cast_struct(t): # struct/exception/union
assert t[0] == TType.STRUCT
def __cast_struct(v):
if isinstance(v, t[1]):
return v # already cast
assert isinstance(v, dict)
tspec = getattr(t[1], '_tspec')
for key in tspec: # requirement check
if tspec[key][0] and key not in v:
raise ThriftParserError('Field %r was required to create '
'constant for type %r' %
(key, t[1].__name__))
for key in v: # cast values
if key not in tspec:
raise ThriftParserError('No field named %r was '
'found in struct of type %r' %
(key, t[1].__name__))
v[key] = _cast(tspec[key][1])(v[key])
return t[1](**v)
return __cast_struct
def _make_enum(name, kvs):
attrs = {'__module__': thrift_stack[-1].__name__, '_ttype': TType.I32}
cls = type(name, (object, ), attrs)
_values_to_names = {}
_names_to_values = {}
if kvs:
val = kvs[0][1]
if val is None:
val = -1
for item in kvs:
if item[1] is None:
item[1] = val + 1
val = item[1]
for key, val in kvs:
setattr(cls, key, val)
_values_to_names[val] = key
_names_to_values[key] = val
setattr(cls, '_VALUES_TO_NAMES', _values_to_names)
setattr(cls, '_NAMES_TO_VALUES', _names_to_values)
return cls
def _make_empty_struct(name, ttype=TType.STRUCT, base_cls=TPayload):
attrs = {'__module__': thrift_stack[-1].__name__, '_ttype': ttype}
return type(name, (base_cls, ), attrs)
def _fill_in_struct(cls, fields, _gen_init=True):
thrift_spec = {}
default_spec = []
_tspec = {}
for field in fields:
if field[0] in thrift_spec or field[3] in _tspec:
raise ThriftGrammerError(('\'%d:%s\' field identifier/name has '
'already been used') % (field[0],
field[3]))
ttype = field[2]
thrift_spec[field[0]] = _ttype_spec(ttype, field[3], field[1])
default_spec.append((field[3], field[4]))
_tspec[field[3]] = field[1], ttype
setattr(cls, 'thrift_spec', thrift_spec)
setattr(cls, 'default_spec', default_spec)
setattr(cls, '_tspec', _tspec)
if _gen_init:
gen_init(cls, thrift_spec, default_spec)
return cls
def _make_struct(name, fields, ttype=TType.STRUCT, base_cls=TPayload,
_gen_init=True):
cls = _make_empty_struct(name, ttype=ttype, base_cls=base_cls)
return _fill_in_struct(cls, fields, _gen_init=_gen_init)
def _make_service(name, funcs, extends):
if extends is None:
extends = object
attrs = {'__module__': thrift_stack[-1].__name__}
cls = type(name, (extends, ), attrs)
thrift_services = []
for func in funcs:
func_name = func[2]
# args payload cls
args_name = '%s_args' % func_name
args_fields = func[3]
args_cls = _make_struct(args_name, args_fields)
setattr(cls, args_name, args_cls)
# result payload cls
result_name = '%s_result' % func_name
result_type = func[1]
result_throws = func[4]
result_oneway = func[0]
result_cls = _make_struct(result_name, result_throws,
_gen_init=False)
setattr(result_cls, 'oneway', result_oneway)
if result_type != TType.VOID:
result_cls.thrift_spec[0] = _ttype_spec(result_type, 'success')
result_cls.default_spec.insert(0, ('success', None))
gen_init(result_cls, result_cls.thrift_spec, result_cls.default_spec)
setattr(cls, result_name, result_cls)
thrift_services.append(func_name)
if extends is not None and hasattr(extends, 'thrift_services'):
thrift_services.extend(extends.thrift_services)
setattr(cls, 'thrift_services', thrift_services)
return cls
def _ttype_spec(ttype, name, required=False):
if isinstance(ttype, int):
return ttype, name, required
else:
return ttype[0], name, ttype[1], required
def _get_ttype(inst, default_ttype=None):
if hasattr(inst, '__dict__') and '_ttype' in inst.__dict__:
return inst.__dict__['_ttype']
return default_ttype
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from test_framework.descriptors import descsum_create
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
def run_test(self):
self.log.info("Connect nodes, set fees, generate blocks, and sync")
self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(self.min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
self.test_simple_two_outputs()
self.test_change()
self.test_no_change()
self.test_invalid_option()
self.test_invalid_change_address()
self.test_valid_change_address()
self.test_change_type()
self.test_coin_selection()
self.test_two_vin()
self.test_two_vin_two_vout()
self.test_invalid_input()
self.test_fee_p2pkh()
self.test_fee_p2pkh_multi_out()
self.test_fee_p2sh()
self.test_fee_4of5()
self.test_spend_2of2()
self.test_locked_wallet()
self.test_many_inputs_fee()
self.test_many_inputs_send()
self.test_op_return()
self.test_watchonly()
self.test_all_watched_funds()
self.test_feerate_with_conf_target_and_estimate_mode()
self.test_option_feerate()
self.test_address_reuse()
self.test_option_subtract_fee_from_outputs()
self.test_subtract_fee_with_presets()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
self.log.info("Test fundrawtxn changePosition option")
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[3].createwallet(wallet_name="wwatch", disable_private_keys=True)
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
self.watchonly_amount = Decimal(200)
wwatch.importpubkey(watchonly_pubkey, "", True)
self.watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, self.watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
self.watchonly_vout = find_vout_for_address(self.nodes[0], self.watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].get_wallet_rpc(self.default_wallet_name).getnewaddress(), self.watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
wwatch.unloadwallet()
def test_simple(self):
self.log.info("Test fundrawtxn")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test that we have enough inputs
def test_simple_two_coins(self):
self.log.info("Test fundrawtxn with 2 coins")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test if we have enough inputs
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_simple_two_outputs(self):
self.log.info("Test fundrawtxn with 2 outputs")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert len(dec_tx['vin']) > 0
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_change(self):
self.log.info("Test fundrawtxn with a vin > required amount")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
self.test_no_change_fee = fee # Use the same fee for the next tx
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_no_change(self):
self.log.info("Test fundrawtxn not having a change output")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_invalid_option(self):
self.log.info("Test fundrawtxn with an invalid option")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
def test_invalid_change_address(self):
self.log.info("Test fundrawtxn with an invalid change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "Change address must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
def test_valid_change_address(self):
self.log.info("Test fundrawtxn with a provided change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
def test_change_type(self):
self.log.info("Test fundrawtxn with a provided change type")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
def test_coin_selection(self):
self.log.info("Test fundrawtxn with a vin < required amount")
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
# add_inputs is enabled by default
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
def test_two_vin(self):
self.log.info("Test fundrawtxn with 2 vins")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
def test_two_vin_two_vout(self):
self.log.info("Test fundrawtxn with 2 vins and 2 vouts")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
def test_invalid_input(self):
self.log.info("Test fundrawtxn with an invalid vin")
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
def test_fee_p2pkh(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn p2pkh fee")
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2pkh_multi_out(self):
"""Compare fee of a standard pubkeyhash transaction with multiple outputs."""
self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")
inputs = []
outputs = {
self.nodes[1].getnewaddress():1.1,
self.nodes[1].getnewaddress():1.2,
self.nodes[1].getnewaddress():0.1,
self.nodes[1].getnewaddress():1.3,
self.nodes[1].getnewaddress():0.2,
self.nodes[1].getnewaddress():0.3,
}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2sh(self):
"""Compare fee of a 2-of-2 multisig p2sh transaction."""
# Create 2-of-2 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[3].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_4of5(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn fee with 4-of-5 addresses")
# Create 4-of-5 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].createmultisig(
4,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
addr3Obj['pubkey'],
addr4Obj['pubkey'],
addr5Obj['pubkey'],
]
)['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_spend_2of2(self):
"""Spend a 2-of-2 multisig transaction over fundraw."""
self.log.info("Test fundpsbt spending 2-of-2 multisig")
# Create 2-of-2 addr.
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
w2 = self.nodes[2].get_wallet_rpc(self.default_wallet_name)
mSigObj = wmulti.addmultisigaddress(
2,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
]
)['address']
if not self.options.descriptors:
wmulti.importaddress(mSigObj)
# Send 1.2 BTC to msig addr.
self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.nodes[0].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
funded_psbt = wmulti.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, options={'changeAddress': w2.getrawchangeaddress()})['psbt']
signed_psbt = w2.walletprocesspsbt(funded_psbt)
final_psbt = w2.finalizepsbt(signed_psbt['psbt'])
self.nodes[2].sendrawtransaction(final_psbt['hex'])
self.nodes[2].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
wmulti.unloadwallet()
def test_locked_wallet(self):
self.log.info("Test fundrawtxn with locked wallet and hardened derivation")
self.nodes[1].encryptwallet("test")
if self.options.descriptors:
self.nodes[1].walletpassphrase('test', 10)
self.nodes[1].importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/0h/*h)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/1h/*h)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
self.nodes[1].walletlock()
# Drain the keypool.
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.09999500}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that does not require a new key for the change output
self.nodes[1].fundrawtransaction(rawtx)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.", self.nodes[1].fundrawtransaction, rawtx)
# Refill the keypool.
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Now we need to unlock.
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
def test_many_inputs_fee(self):
"""Multiple (~19) inputs tx test | Compare fee."""
self.log.info("Test fundrawtxn fee with many inputs")
# Empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs
def test_many_inputs_send(self):
"""Multiple (~19) inputs tx test | sign/send."""
self.log.info("Test fundrawtxn sign+send with many inputs")
# Again, empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
def test_op_return(self):
self.log.info("Test fundrawtxn with OP_RETURN and no vin")
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
def test_watchonly(self):
self.log.info("Test fundrawtxn using only watchonly")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
self.nodes[3].loadwallet('wwatch')
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
# Setup change addresses for the watchonly wallet
desc_import = [{
"desc": descsum_create("wpkh(tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H/1/*)"),
"timestamp": "now",
"internal": True,
"active": True,
"keypool": True,
"range": [0, 100],
"watchonly": True,
}]
if self.options.descriptors:
wwatch.importdescriptors(desc_import)
else:
wwatch.importmulti(desc_import)
# Backward compatibility test (2nd params is includeWatching)
result = wwatch.fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], self.watchonly_txid)
assert "fee" in result.keys()
assert_greater_than(result["changepos"], -1)
wwatch.unloadwallet()
def test_all_watched_funds(self):
self.log.info("Test fundrawtxn using entirety of watched funds")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
self.nodes[3].loadwallet('wwatch')
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
w3 = self.nodes[3].get_wallet_rpc(self.default_wallet_name)
result = wwatch.fundrawtransaction(rawtx, {'includeWatching': True, 'changeAddress': w3.getrawchangeaddress(), 'subtractFeeFromOutputs': [0]})
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert res_dec["vin"][0]["txid"] == self.watchonly_txid
assert_greater_than(result["fee"], 0)
assert_equal(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][0]["value"], self.watchonly_amount)
signedtx = wwatch.signrawtransactionwithwallet(result["hex"])
assert not signedtx["complete"]
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert signedtx["complete"]
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
wwatch.unloadwallet()
def test_option_feerate(self):
self.log.info("Test fundrawtxn feeRate option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10 * self.min_relay_tx_fee})
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)", self.nodes[3].fundrawtransaction, rawtx, {"feeRate": 1})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
def test_feerate_with_conf_target_and_estimate_mode(self):
self.log.info("Test fundrawtxn passing an explicit fee rate using conf_target and estimate_mode")
node = self.nodes[3]
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(node.listunspent(1)), 1)
inputs = []
outputs = {node.getnewaddress() : 1}
rawtx = node.createrawtransaction(inputs, outputs)
for unit, fee_rate in {"btc/kb": 0.1, "sat/b": 10000}.items():
self.log.info("Test fundrawtxn with conf_target {} estimate_mode {} produces expected fee".format(fee_rate, unit))
# With no arguments passed, expect fee of 141 sats/b.
assert_approx(node.fundrawtransaction(rawtx)["fee"], vexp=0.00000141, vspan=0.00000001)
# Expect fee to be 10,000x higher when explicit fee 10,000x greater is specified.
result = node.fundrawtransaction(rawtx, {"conf_target": fee_rate, "estimate_mode": unit})
assert_approx(result["fee"], vexp=0.0141, vspan=0.0001)
for field, fee_rate in {"conf_target": 0.1, "estimate_mode": "sat/b"}.items():
self.log.info("Test fundrawtxn raises RPC error if both feeRate and {} are passed".format(field))
assert_raises_rpc_error(
-8, "Cannot specify both {} and feeRate".format(field),
lambda: node.fundrawtransaction(rawtx, {"feeRate": 0.1, field: fee_rate}))
self.log.info("Test fundrawtxn with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": v, "conf_target": 0.1}))
for mode in ["foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, "Invalid estimate_mode parameter",
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": mode, "conf_target": 0.1}))
self.log.info("Test fundrawtxn with invalid conf_target settings")
for mode in ["unset", "economical", "conservative", "btc/kb", "sat/b"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": mode, "conf_target": v}))
if mode in ["btc/kb", "sat/b"]:
assert_raises_rpc_error(-3, "Amount out of range",
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": mode, "conf_target": -1}))
assert_raises_rpc_error(-4, "Fee rate (0.00000000 BTC/kB) is lower than the minimum fee rate setting (0.00001000 BTC/kB)",
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": mode, "conf_target": 0}))
else:
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008",
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": mode, "conf_target": n}))
for unit, fee_rate in {"sat/B": 0.99999999, "BTC/kB": 0.00000999}.items():
self.log.info("- raises RPC error 'fee rate too low' if conf_target {} and estimate_mode {} are passed".format(fee_rate, unit))
assert_raises_rpc_error(-4, "Fee rate (0.00000999 BTC/kB) is lower than the minimum fee rate setting (0.00001000 BTC/kB)",
lambda: self.nodes[1].fundrawtransaction(rawtx, {"estimate_mode": unit, "conf_target": fee_rate, "add_inputs": True}))
def test_address_reuse(self):
"""Test no address reuse occurs."""
self.log.info("Test fundrawtxn does not reuse addresses")
rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert changeaddress != ""
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool.
assert changeaddress != nextaddr
def test_option_subtract_fee_from_outputs(self):
self.log.info("Test fundrawtxn subtractFeeFromOutputs option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# Split the fee between outputs 0, 2, and 3, but not output 1.
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction.
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions.
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# Output 1 is the same in both transactions.
assert_equal(share[1], 0)
# The other 3 outputs are smaller as a result of subtractFeeFromOutputs.
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# Outputs 2 and 3 take the same share of the fee.
assert_equal(share[2], share[3])
# Output 0 takes at least as much share of the fee, and no more than 2
# satoshis more, than outputs 2 and 3.
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# The fee is the same in both transactions.
assert_equal(result[0]['fee'], result[1]['fee'])
# The total subtracted from the outputs is equal to the fee.
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
def test_subtract_fee_with_presets(self):
self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")
addr = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
rawtx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 5}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx, {'subtractFeeFromOutputs': [0]})
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])
self.nodes[0].sendrawtransaction(signedtx['hex'])
if __name__ == '__main__':
RawTransactionsTest().main()
|
|
from math import sqrt
from node import *
class Point:
def __init__(self, x, y):
"""
Constructor : sets x and y co-ordinates
@param x: x co-ordinate
@param y: y co-ordinate
"""
self.x = x
self.y = y
def __eq__(self, other):
"""
Override method that defines equality
@rtype : bool
@param other: other object to compare this object with
@return: Equal/Not Equal
"""
return self.x == other.x and self.y == other.y
def __hash__(self):
return hash((self.x, self.y))
class Grid:
def __init__(self, size, node_range):
"""
Grid Constructor
@param size: size x size will be the dimension of the grid
@param node_range: range up to which a node can sense
"""
self.size = size
self.nodes = []
self.point_to_node = dict()
self.node_range = node_range
def add_node(self, node: Node):
"""
Add a node to the grid
@param node: node to add
"""
self.nodes.append(node)
self.point_to_node[Point(node.actual_x, node.actual_y)] = node
def run_neighbour_discovery(self):
"""
Finds the neighbouring nodes of every node
"""
for a in self.nodes:
for b in self.nodes:
if a == b:
continue
d = sqrt(pow(a.actual_x - b.actual_x, 2) + pow(a.actual_y - b.actual_y, 2))
if d < self.node_range:
a.add_neighbour(b)
b.add_neighbour(a)
def assign_L1_nodes(self, error: int):
"""
If a node is surrounded by more than 3 L0 nodes
its location can be estimated, with some error
@param error: error in location
"""
for i in range(0, len(self.nodes)):
if self.nodes[i].type == NodeType.OTHER and Grid.__is_valid_candidate_to_estimate(self.nodes[i]):
self.nodes[i].set_node_type(NodeType.L1, error)
@staticmethod
def __is_valid_candidate_to_estimate(node: Node):
"""
Checks if a node is valid to run estimation on
Validation Criteria : A node is valid if it is surrounded
by at least 3 nodes who know their location
@param node:
@return:
"""
i = 0
for n in node.neighbours:
if n.type != NodeType.OTHER:
i += 1
return i > 3
def estimate_other_nodes(self):
"""
Estimates the location of nodes that don't know their location
"""
for i in range(0, len(self.nodes)):
if self.nodes[i].type == NodeType.OTHER and Grid.__is_valid_candidate_to_estimate(self.nodes[i]):
pre_A = []
pre_C = []
for node in self.nodes[i].neighbours:
if node.type != NodeType.OTHER:
pre_A.append((-2 * node.x, -2 * node.y))
# may need to introduce error here
# calculates d^2 = (x - x1)^2 + (y - y1)^2,
# where x1 and y1 are known x and y are to be found
dist_btwn_a_b_squared = pow(self.nodes[i].actual_x - node.x, 2) + pow(
self.nodes[i].actual_y - node.y, 2)
x1_squared = node.x ** 2
y1_squared = node.y ** 2
pre_C.append(dist_btwn_a_b_squared - x1_squared - y1_squared)
A = []
C = []
j = 0
while j < len(pre_A) and j + 1 < len(pre_A):
A.append([pre_A[j][0] - pre_A[j + 1][0], pre_A[j][1] - pre_A[j + 1][1]])
C.append(pre_C[j] - pre_C[j + 1])
j += 2
# estimates co-ordinates for the current node
self.nodes[i].estimate_coordinates(A, C)
def analyse(self) -> str:
"""
Analyses the results of the simulation by calculating,
i) average distance between the actual point & estimated point
ii) standard deviation of it
@return: string that can be printed to a log file or console
"""
L0_nodes = L1_nodes = other_nodes = 0
distances = []
unresolved = 0
for node in self.nodes:
if node.type == NodeType.L0:
L0_nodes += 1
elif node.type == NodeType.L1:
L1_nodes += 1
elif node.type == NodeType.OTHER:
other_nodes += 1
if node.x is None or node.y is None:
unresolved += 1
continue
# calculate distance between actual point and estimated point
d = sqrt(pow(node.actual_x - node.x, 2) + pow(node.actual_y - node.y, 2))
distances.append(d)
avg = calc_avg(distances)
std_dev = calc_std_dev(avg, distances)
return 'Total Number of nodes : ' + str(len(self.nodes)) + '\n' + \
'L0 Nodes : ' + str(L0_nodes) + '\n' + \
'L1 Nodes : ' + str(L1_nodes) + '\n' + \
'Other Nodes : ' + str(other_nodes) + '\n' + \
'Unresolved Nodes : ' + str(unresolved) + '\n\n' + \
'Avg Distance from actual point : ' + str(avg) + '\n' + \
'Standard Dev : ' + str(std_dev) + '\n'
@staticmethod
def distance(a: Node, b: Node):
"""
Uses estimated location to calculate distance
@param a: node1
@param b: node2
@return: distance between node1 and node2
"""
return sqrt(pow(a.x - b.x, 2) + pow(a.y - b.y, 2))
@staticmethod
def distance_square(a: Node, b: Node):
"""
Uses estimated location to calculate distance squared
@param a: node1
@param b: node2
@return: distance between node1 and node2
"""
return pow(a.x - b.x, 2) + pow(a.y - b.y, 2)
def calc_avg(values):
"""
Calculates the average of 'values' array
@param values: array for which avg is to be found
@return: avg of values array
"""
sm = 0.0
for x in values:
sm += x
sm /= len(values)
return sm
def calc_std_dev(avg, values):
"""
Calculates standard deviation of the values array,
using the given mean (avg)
@param avg: mean of the values
@param values: array of values
@return: standard deviation
"""
sm = 0.0
for x in values:
sm += (avg - x) ** 2
sm /= len(values)
sm = sqrt(sm)
return sm
|
|
# Copyright (c) 2011-2021, Manfred Moitzi
# License: MIT License
"""
Tags
----
A list of :class:`~ezdxf.lldxf.types.DXFTag`, inherits from Python standard list.
Unlike the statement in the DXF Reference "Do not write programs that rely on
the order given here", tag order is sometimes essential and some group codes
may appear multiples times in one entity. At the worst case
(:class:`~ezdxf.entities.material.Material`: normal map shares group codes with
diffuse map) using same group codes with different meanings.
"""
from typing import Iterable, Tuple, Iterator, Any
from .const import DXFStructureError, DXFValueError, STRUCTURE_MARKER
from .types import DXFTag, EMBEDDED_OBJ_MARKER, EMBEDDED_OBJ_STR, dxftag
from .tagger import internal_tag_compiler
COMMENT_CODE = 999
class Tags(list):
"""Collection of :class:`~ezdxf.lldxf.types.DXFTag` as flat list.
Low level tag container, only required for advanced stuff.
"""
@classmethod
def from_text(cls, text: str) -> "Tags":
"""Constructor from DXF string."""
return cls(internal_tag_compiler(text))
def __copy__(self) -> "Tags":
return self.__class__(tag.clone() for tag in self)
clone = __copy__
def get_handle(self) -> str:
"""Get DXF handle. Raises :class:`DXFValueError` if handle not exist.
Returns:
handle as plain hex string like ``'FF00'``
Raises:
DXFValueError: no handle found
"""
try:
code, handle = self[1] # fast path for most common cases
except IndexError:
raise DXFValueError("No handle found.")
if code == 5 or code == 105:
return handle
for code, handle in self:
if code in (5, 105):
return handle
raise DXFValueError("No handle found.")
def replace_handle(self, new_handle: str) -> None:
"""Replace existing handle.
Args:
new_handle: new handle as plain hex string e.g. ``'FF00'``
"""
for index, tag in enumerate(self):
if tag.code in (5, 105):
self[index] = DXFTag(tag.code, new_handle)
return
def dxftype(self) -> str:
"""Returns DXF type of entity, e.g. ``'LINE'``."""
return self[0].value
def has_tag(self, code: int) -> bool:
"""Returns ``True`` if a :class:`~ezdxf.lldxf.types.DXFTag` with given
group `code` is present.
Args:
code: group code as int
"""
return any(tag.code == code for tag in self)
def get_first_value(self, code: int, default=DXFValueError) -> Any:
"""Returns value of first :class:`~ezdxf.lldxf.types.DXFTag` with given
group code or default if `default` != :class:`DXFValueError`, else
raises :class:`DXFValueError`.
Args:
code: group code as int
default: return value for default case or raises :class:`DXFValueError`
"""
for tag in self:
if tag.code == code:
return tag.value
if default is DXFValueError:
raise DXFValueError(code)
else:
return default
def get_first_tag(self, code: int, default=DXFValueError) -> DXFTag:
"""Returns first :class:`~ezdxf.lldxf.types.DXFTag` with given group
code or `default`, if `default` != :class:`DXFValueError`, else raises
:class:`DXFValueError`.
Args:
code: group code as int
default: return value for default case or raises :class:`DXFValueError`
"""
for tag in self:
if tag.code == code:
return tag
if default is DXFValueError:
raise DXFValueError(code)
else:
return default # type: ignore
def find_all(self, code: int) -> "Tags":
"""Returns a list of :class:`~ezdxf.lldxf.types.DXFTag` with given
group code.
Args:
code: group code as int
"""
return self.__class__(tag for tag in self if tag.code == code)
def tag_index(self, code: int, start: int = 0, end: int = None) -> int:
"""Return index of first :class:`~ezdxf.lldxf.types.DXFTag` with given
group code.
Args:
code: group code as int
start: start index as int
end: end index as int, ``None`` for end index = ``len(self)``
"""
if end is None:
end = len(self)
index = start
while index < end:
if self[index].code == code:
return index
index += 1
raise DXFValueError(code)
def update(self, tag: DXFTag) -> None:
"""Update first existing tag with same group code as `tag`, raises
:class:`DXFValueError` if tag not exist.
"""
index = self.tag_index(tag.code)
self[index] = tag
def set_first(self, tag: DXFTag) -> None:
"""Update first existing tag with group code ``tag.code`` or append tag."""
try:
self.update(tag)
except DXFValueError:
self.append(tag)
def remove_tags(self, codes: Iterable[int]) -> None:
"""Remove all tags inplace with group codes specified in `codes`.
Args:
codes: iterable of group codes as int
"""
self[:] = [tag for tag in self if tag.code not in set(codes)]
def pop_tags(self, codes: Iterable[int]) -> Iterator[DXFTag]:
"""Pop tags with group codes specified in `codes`.
Args:
codes: iterable of group codes
"""
remaining = []
codes = set(codes)
for tag in self:
if tag.code in codes:
yield tag
else:
remaining.append(tag)
self[:] = remaining
def remove_tags_except(self, codes: Iterable[int]) -> None:
"""Remove all tags inplace except those with group codes specified in
`codes`.
Args:
codes: iterable of group codes
"""
self[:] = [tag for tag in self if tag.code in set(codes)]
def filter(self, codes: Iterable[int]) -> Iterator[DXFTag]:
"""Iterate and filter tags by group `codes`.
Args:
codes: group codes to filter
"""
return (tag for tag in self if tag.code not in set(codes))
def collect_consecutive_tags(
self, codes: Iterable[int], start: int = 0, end: int = None
) -> "Tags":
"""Collect all consecutive tags with group code in `codes`, `start` and
`end` delimits the search range. A tag code not in codes ends the
process.
Args:
codes: iterable of group codes
start: start index as int
end: end index as int, ``None`` for end index = ``len(self)``
Returns:
collected tags as :class:`Tags`
"""
codes = frozenset(codes)
index = int(start)
if end is None:
end = len(self)
bag = self.__class__()
while index < end:
tag = self[index]
if tag.code in codes:
bag.append(tag)
index += 1
else:
break
return bag
def has_embedded_objects(self) -> bool:
for tag in self:
if (
tag.code == EMBEDDED_OBJ_MARKER
and tag.value == EMBEDDED_OBJ_STR
):
return True
return False
@classmethod
def strip(cls, tags: "Tags", codes: Iterable[int]) -> "Tags":
"""Constructor from `tags`, strips all tags with group codes in `codes`
from tags.
Args:
tags: iterable of :class:`~ezdxf.lldxf.types.DXFTag`
codes: iterable of group codes as int
"""
return cls((tag for tag in tags if tag.code not in frozenset(codes)))
def text2tags(text: str) -> Tags:
return Tags.from_text(text)
def group_tags(
tags: Iterable[DXFTag], splitcode: int = STRUCTURE_MARKER
) -> Iterable[Tags]:
"""Group of tags starts with a SplitTag and ends before the next SplitTag.
A SplitTag is a tag with code == splitcode, like (0, 'SECTION') for
splitcode == 0.
Args:
tags: iterable of :class:`DXFTag`
splitcode: group code of split tag
"""
# first do nothing, skip tags in front of the first split tag
def append(tag):
pass
group = None
for tag in tags:
if tag.code == splitcode:
if group is not None:
yield group
group = Tags([tag])
append = group.append # redefine append: add tags to this group
else:
append(tag)
if group is not None:
yield group
def text_to_multi_tags(
text: str, code: int = 303, size: int = 255, line_ending: str = "^J"
) -> Tags:
text = "".join(text).replace("\n", line_ending)
def chop():
start = 0
end = size
while start < len(text):
yield text[start:end]
start = end
end += size
return Tags(DXFTag(code, part) for part in chop())
def multi_tags_to_text(tags: Tags, line_ending: str = "^J") -> str:
return "".join(tag.value for tag in tags).replace(line_ending, "\n")
OPEN_LIST = (1002, "{")
CLOSE_LIST = (1002, "}")
def xdata_list(name: str, xdata_tags: Iterable) -> Tags:
tags = Tags()
if name:
tags.append((1000, name))
tags.append(OPEN_LIST)
tags.extend(xdata_tags)
tags.append(CLOSE_LIST)
return tags
def remove_named_list_from_xdata(name: str, tags: Tags) -> Tags:
start, end = get_start_and_end_of_named_list_in_xdata(name, tags)
del tags[start:end]
return tags
def get_named_list_from_xdata(name: str, tags: Tags) -> Tags:
start, end = get_start_and_end_of_named_list_in_xdata(name, tags)
return Tags(tags[start:end])
class NotFoundException(Exception):
pass
def get_start_and_end_of_named_list_in_xdata(
name: str, tags: Tags
) -> Tuple[int, int]:
start = None
end = None
level = 0
for index in range(len(tags)):
tag = tags[index]
if start is None and tag == (1000, name):
next_tag = tags[index + 1]
if next_tag == OPEN_LIST:
start = index
continue
if start is not None:
if tag == OPEN_LIST:
level += 1
elif tag == CLOSE_LIST:
level -= 1
if level == 0:
end = index
break
if start is None:
raise NotFoundException
if end is None:
raise DXFStructureError(
'Invalid XDATA structure: missing (1002, "}").'
)
return start, end + 1
def find_begin_and_end_of_encoded_xdata_tags(
name: str, tags: Tags
) -> Tuple[int, int]:
"""Find encoded XDATA tags, surrounded by group code 1000 tags
name_BEGIN and name_END (e.g. MTEXT column specification).
Raises:
NotFoundError: tag group not found
DXFStructureError: missing begin- or end tag
"""
begin_name = name + "_BEGIN"
end_name = name + "_END"
start = None
end = None
for index, (code, value) in enumerate(tags):
if code == 1000:
if value == begin_name:
start = index
elif value == end_name:
end = index + 1
break
if start is None:
if end is not None: # end tag without begin tag!
raise DXFStructureError(
f"Invalid XDATA structure: missing begin tag (1000, {begin_name})."
)
raise NotFoundException
if end is None:
raise DXFStructureError(
f"Invalid XDATA structure: missing end tag (1000, {end_name})."
)
return start, end
def binary_data_to_dxf_tags(
data: bytes,
length_group_code: int = 160,
value_group_code: int = 310,
value_size=127,
) -> Tags:
"""Convert binary data to DXF tags."""
tags = Tags()
length = len(data)
tags.append(dxftag(length_group_code, length))
index = 0
while index < length:
chunk = data[index : index + value_size]
tags.append(dxftag(value_group_code, chunk))
index += value_size
return tags
|
|
# coding=utf-8
"""
#### Grants
* Normal usage
```
GRANT REPLICATION CLIENT on *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
* For innodb engine status
```
GRANT SUPER ON *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
* For innodb engine status on MySQL versions 5.1.24+
```
GRANT PROCESS ON *.* TO 'user'@'hostname' IDENTIFIED BY
'password';
```
#### Dependencies
* MySQLdb
Netuitive Change History
2016/07/13 DVG - Send COUNTERS as COUNTERS, rather then converting them to rates and sending as GAUGUES.
2016/08/12 DVG - Removed Threads_created from the list of GAUGES, since it is, in fact, a COUNTER.
"""
import diamond.collector
from diamond.collector import str_to_bool
import re
import time
try:
import MySQLdb
from MySQLdb import MySQLError
except ImportError:
MySQLdb = None
MySQLError = ValueError
class MySQLCollector(diamond.collector.Collector):
_GAUGE_KEYS = [
'Innodb_buffer_pool_pages_data', 'Innodb_buffer_pool_pages_dirty',
'Innodb_buffer_pool_pages_free',
'Innodb_buffer_pool_pages_misc', 'Innodb_buffer_pool_pages_total',
'Innodb_data_pending_fsyncs', 'Innodb_data_pending_reads',
'Innodb_data_pending_writes',
'Innodb_os_log_pending_fsyncs', 'Innodb_os_log_pending_writes',
'Innodb_page_size',
'Innodb_row_lock_current_waits', 'Innodb_row_lock_time',
'Innodb_row_lock_time_avg',
'Innodb_row_lock_time_max',
'Key_blocks_unused', 'Last_query_cost', 'Max_used_connections',
'Open_files', 'Open_streams', 'Open_table_definitions', 'Open_tables',
'Qcache_free_blocks', 'Qcache_free_memory',
'Qcache_queries_in_cache', 'Qcache_total_blocks',
'Seconds_Behind_Master',
'Slave_open_temp_tables',
'Threads_cached', 'Threads_connected',
'Threads_running',
# innodb status non counter keys
'Innodb_bp_created_per_sec',
'Innodb_bp_pages_evicted_no_access_per_sec',
'Innodb_bp_pages_not_young_per_sec',
'Innodb_bp_pages_read_ahead_per_sec', 'Innodb_bp_pages_young_per_sec',
'Innodb_bp_reads_per_sec', 'Innodb_bp_written_per_sec',
'Innodb_bp_add_alloc', 'Innodb_bp_db_pages',
'Innodb_bp_dictionary_alloc', 'Innodb_bp_free_buffers',
'Innodb_bp_hit_rate', 'Innodb_bp_io_cur_pages',
'Innodb_bp_io_sum_pages', 'Innodb_bp_io_unzip_cur_pages',
'Innodb_bp_io_unzip_sum_pages', 'Innodb_bp_lru_len',
'Innodb_bp_modified_pages', 'Innodb_bp_not_young_hit_rate',
'Innodb_bp_old_db_pages', 'Innodb_bp_pending_pages',
'Innodb_bp_pending_writes_flush_list', 'Innodb_bp_pending_writes_lru',
'Innodb_bp_pending_writes_single_page', 'Innodb_bp_size',
'Innodb_bp_total_alloc', 'Innodb_bp_unzip_lru_len',
'Innodb_bp_young_hit_rate',
'Innodb_hash_searches_per_sec',
'Innodb_io_syncs_per_sec',
'Innodb_log_io_per_sec',
'Innodb_non_hash_searches_per_sec',
'Innodb_per_sec_avg',
'Innodb_reads_per_sec',
'Innodb_rows_deleted_per_sec', 'Innodb_rows_inserted_per_sec',
'Innodb_rows_read_per_sec', 'Innodb_rows_updated_per_sec',
'Innodb_sem_spins_per_wait_mutex', 'Innodb_sem_spins_per_wait_rw_excl',
'Innodb_sem_spins_per_wait_rw_shared',
'Innodb_writes_per_sec',
'Innodb_bytes_per_read',
'Innodb_hash_node_heap', 'Innodb_hash_table_size',
'Innodb_hash_used_cells',
'Innodb_ibuf_free_list_len', 'Innodb_ibuf_seg_size', 'Innodb_ibuf_size',
'Innodb_io_ibuf_logs', 'Innodb_io_ibuf_reads', 'Innodb_io_ibuf_syncs',
'Innodb_io_pending_flush_bp', 'Innodb_io_pending_flush_log',
'Innodb_io_pending_reads', 'Innodb_io_pending_writes', '',
'Innodb_log_pending_checkpoint_writes', 'Innodb_log_pending_log_writes',
'Innodb_row_queries_inside', 'Innodb_row_queries_queue',
'Innodb_trx_history_list_length', 'Innodb_trx_total_lock_structs',
'Innodb_status_process_time', ]
_IGNORE_KEYS = [
'Master_Port', 'Master_Server_Id',
'Last_Errno', 'Last_IO_Errno', 'Last_SQL_Errno', ]
innodb_status_keys = {
'Innodb_bp_total_alloc,' +
'Innodb_bp_add_alloc':
'Total memory allocated (\d+)\; in additional pool allocated (\d+)',
'Innodb_bp_reads_per_sec,' +
'Innodb_bp_created_per_sec,' +
'Innodb_bp_written_per_sec':
'(^\d+.\d+) reads/s, (\d+.\d+) creates/s, (\d+.\d+) writes/s',
'Innodb_io_ibuf_reads,Innodb_io_ibuf_logs,Innodb_io_ibuf_syncs':
' ibuf aio reads: (\d+), log i/o\'s: (\d+), sync i/o\'s: (\d+)',
'Innodb_log_pending_log_writes,Innodb_log_pending_checkpoint_writes':
'(\d+) pending log writes, (\d+) pending chkp writes',
'Innodb_hash_searches_per_sec,Innodb_non_hash_searches_per_sec':
'(\d+.\d+) hash searches/s, (\d+.\d+) non-hash searches/s',
'Innodb_row_queries_inside,Innodb_row_queries_queue':
'(\d+) queries inside InnoDB, (\d+) queries in queue',
'Innodb_trx_total_lock_structs':
'(\d+) lock struct\(s\), ' +
'heap size (\d+), ' +
'(\d+) row lock\(s\), ' +
'undo log entries (\d+)',
'Innodb_log_io_total,Innodb_log_io_per_sec':
'(\d+) log i\/o\'s done, (\d+.\d+) log i\/o\'s\/second',
'Innodb_io_os_file_reads,Innodb_io_os_file_writes,' +
'Innodb_io_os_file_fsyncs':
'(\d+) OS file reads, (\d+) OS file writes, (\d+) OS fsyncs',
'Innodb_rows_inserted_per_sec,Innodb_rows_updated_per_sec,' +
'Innodb_rows_deleted_per_sec,Innodb_rows_read_per_sec':
'(\d+.\d+) inserts\/s, ' +
'(\d+.\d+) updates\/s, ' +
'(\d+.\d+) deletes\/s, ' +
'(\d+.\d+) reads\/s',
'Innodb_reads_per_sec,Innodb_bytes_per_read,Innodb_io_syncs_per_sec,' +
'Innodb_writes_per_sec':
'(\d+.\d+) reads\/s, (\d+) avg bytes\/read, (\d+.\d+) writes\/s, ' +
'(\d+.\d+) fsyncs\/s',
'Innodb_bp_pages_young_per_sec,Innodb_bp_pages_not_young_per_sec':
'(\d+.\d+) youngs\/s, (\d+.\d+) non-youngs\/s',
'Innodb_bp_hit_rate,Innodb_bp_young_hit_rate,' +
'Innodb_bp_not_young_hit_rate':
'Buffer pool hit rate (\d+) \/ \d+, ' +
'young-making rate (\d+) \/ \d+ not (\d+) \/ \d+',
'Innodb_bp_size':
'Buffer pool size (\d+)',
'Innodb_bp_db_pages':
'Database pages (\d+)',
'Innodb_bp_dictionary_alloc':
'Dictionary memory allocated (\d+)',
'Innodb_bp_free_buffers':
'Free buffers (\d+)',
'Innodb_hash_table_size,Innodb_hash_node_heap':
'Hash table size (\d+), node heap has (\d+) buffer\(s\)',
'Innodb_trx_history_list_length':
'History list length (\d+)',
'Innodb_bp_io_sum_pages,Innodb_bp_io_cur_pages,' +
'Innodb_bp_io_unzip_sum_pages,Innodb_bp_io_unzip_cur_pages':
'I\/O sum\[(\d+)\]:cur\[(\d+)\], unzip sum\[(\d+)\]:cur\[(\d+)\]',
'Innodb_ibuf_size,Innodb_ibuf_free_list_len,Innodb_ibuf_seg_size,' +
'Innodb_ibuf_merges':
'Ibuf: size (\d+), free list len (\d+), seg size (\d+), (\d+) ' +
'merges',
'Innodb_bp_lru_len,Innodb_bp_unzip_lru_len':
'LRU len: (\d+), unzip_LRU len: (\d+)',
'Innodb_bp_modified_pages':
'Modified db pages (\d+)',
'Innodb_sem_mutex_spin_waits,Innodb_sem_mutex_rounds,' +
'Innodb_sem_mutex_os_waits':
'Mutex spin waits (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_rows_inserted,Innodb_rows_updated,Innodb_rows_deleted,' +
'Innodb_rows_read':
'Number of rows inserted (\d+), updated (\d+), deleted (\d+), ' +
'read (\d+)',
'Innodb_bp_old_db_pages':
'Old database pages (\d+)',
'Innodb_sem_os_reservation_count,' +
'Innodb_sem_os_signal_count':
'OS WAIT ARRAY INFO: reservation count (\d+), signal count (\d+)',
'Innodb_bp_pages_young,Innodb_bp_pages_not_young':
'Pages made young (\d+), not young (\d+)',
'Innodb_bp_pages_read,Innodb_bp_pages_created,Innodb_bp_pages_written':
'Pages read (\d+), created (\d+), written (\d+)',
'Innodb_bp_pages_read_ahead_per_sec,' +
'Innodb_bp_pages_evicted_no_access_per_sec,' +
'Innodb_status_bp_pages_random_read_ahead':
'Pages read ahead (\d+.\d+)/s, ' +
'evicted without access (\d+.\d+)\/s, ' +
'Random read ahead (\d+.\d+)/s',
'Innodb_io_pending_flush_log,Innodb_io_pending_flush_bp':
'Pending flushes \(fsync\) log: (\d+); buffer pool: (\d+)',
'Innodb_io_pending_reads,Innodb_io_pending_writes':
'Pending normal aio reads: (\d+) \[\d+, \d+, \d+, \d+\], aio ' +
'writes: (\d+) \[\d+, \d+, \d+, \d+\]',
'Innodb_bp_pending_writes_lru,Innodb_bp_pending_writes_flush_list,' +
'Innodb_bp_pending_writes_single_page':
'Pending writes: LRU (\d+), flush list (\d+), single page (\d+)',
'Innodb_per_sec_avg':
'Per second averages calculated from the last (\d+) seconds',
'Innodb_sem_rw_excl_spins,Innodb_sem_rw_excl_rounds,' +
'Innodb_sem_rw_excl_os_waits':
'RW-excl spins (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_sem_shared_spins,Innodb_sem_shared_rounds,' +
'Innodb_sem_shared_os_waits':
'RW-shared spins (\d+), rounds (\d+), OS waits (\d+)',
'Innodb_sem_spins_per_wait_mutex,Innodb_sem_spins_per_wait_rw_shared,' +
'Innodb_sem_spins_per_wait_rw_excl':
'Spin rounds per wait: (\d+.\d+) mutex, (\d+.\d+) RW-shared, ' +
'(\d+.\d+) RW-excl',
'Innodb_main_thd_log_flush_writes':
'srv_master_thread log flush and writes: (\d+)',
'Innodb_main_thd_loops_one_sec,Innodb_main_thd_loops_sleeps,' +
'Innodb_main_thd_loops_ten_sec,Innodb_main_thd_loops_background,' +
'Innodb_main_thd_loops_flush':
'srv_master_thread loops: (\d+) 1_second, (\d+) sleeps, (\d+) ' +
'10_second, (\d+) background, (\d+) flush',
'Innodb_ibuf_inserts,Innodb_ibuf_merged_recs,Innodb_ibuf_merges':
'(\d+) inserts, (\d+) merged recs, (\d+) merges',
}
innodb_status_match = {}
def __init__(self, *args, **kwargs):
super(MySQLCollector, self).__init__(*args, **kwargs)
for key in self.innodb_status_keys:
self.innodb_status_keys[key] = re.compile(
self.innodb_status_keys[key])
def process_config(self):
super(MySQLCollector, self).process_config()
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# Move legacy config format to new format
if 'host' in self.config:
hoststr = "%s:%s@%s:%s/%s" % (
self.config['user'],
self.config['passwd'],
self.config['host'],
self.config['port'],
self.config['db'],
)
self.config['hosts'].append(hoststr)
# Normalize some config vars
self.config['master'] = str_to_bool(self.config['master'])
self.config['slave'] = str_to_bool(self.config['slave'])
self.config['innodb'] = str_to_bool(self.config['innodb'])
self.db = None
def get_default_config_help(self):
config_help = super(MySQLCollector, self).get_default_config_help()
config_help.update({
'publish':
"Which rows of '[SHOW GLOBAL STATUS](http://dev.mysql." +
"com/doc/refman/5.1/en/show-status.html)' you would " +
"like to publish. Leave unset to publish all",
'slave': 'Collect SHOW SLAVE STATUS',
'master': 'Collect SHOW MASTER STATUS',
'innodb': 'Collect SHOW ENGINE INNODB STATUS',
'hosts': 'List of hosts to collect from. Format is ' +
'yourusername:yourpassword@host:port/db[/nickname]' +
'use db "None" to avoid connecting to a particular db'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
# Which rows of 'SHOW GLOBAL STATUS' you would like to publish.
# http://dev.mysql.com/doc/refman/5.1/en/show-status.html
# Leave unset to publish all
# 'publish': '',
'slave': False,
'master': False,
'innodb': False,
})
return config
def get_db_stats(self, query):
cursor = self.db.cursor(cursorclass=MySQLdb.cursors.DictCursor)
try:
cursor.execute(query)
return cursor.fetchall()
except MySQLError, e:
self.log.error('MySQLCollector could not get db stats', e)
return ()
def connect(self, params):
try:
self.db = MySQLdb.connect(**params)
self.log.debug('MySQLCollector: Connected to database.')
except MySQLError, e:
self.log.error('MySQLCollector couldnt connect to database %s', e)
return False
return True
def disconnect(self):
self.db.close()
def get_db_global_status(self):
return self.get_db_stats('SHOW GLOBAL STATUS')
def get_db_master_status(self):
return self.get_db_stats('SHOW MASTER STATUS')
def get_db_slave_status(self):
return self.get_db_stats('SHOW SLAVE STATUS')
def get_db_innodb_status(self):
return self.get_db_stats('SHOW ENGINE INNODB STATUS')
def get_stats(self, params):
metrics = {'status': {}}
if not self.connect(params):
return metrics
rows = self.get_db_global_status()
for row in rows:
try:
metrics['status'][row['Variable_name']] = float(row['Value'])
except:
pass
if self.config['master']:
metrics['master'] = {}
try:
rows = self.get_db_master_status()
for row_master in rows:
for key, value in row_master.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['master'][key] = float(row_master[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get master status')
pass
if self.config['slave']:
metrics['slave'] = {}
try:
rows = self.get_db_slave_status()
for row_slave in rows:
for key, value in row_slave.items():
if key in self._IGNORE_KEYS:
continue
try:
metrics['slave'][key] = float(row_slave[key])
except:
pass
except:
self.log.error('MySQLCollector: Couldnt get slave status')
pass
if self.config['innodb']:
metrics['innodb'] = {}
innodb_status_timer = time.time()
try:
rows = self.get_db_innodb_status()
innodb_status_output = rows[0]
todo = self.innodb_status_keys.keys()
for line in innodb_status_output['Status'].split('\n'):
for key in todo:
match = self.innodb_status_keys[key].match(line)
if match is not None:
todo.remove(key)
match_index = 1
for key_index in key.split(','):
try:
value = float(match.group(match_index))
# store value
if key_index in metrics:
self.log.debug("MySQLCollector: %s " +
"already defined, " +
"ignoring new value",
key_index)
else:
metrics['innodb'][key_index] = value
match_index += 1
except IndexError:
self.log.debug(
"MySQLCollector: Cannot find value " +
"in innodb status for %s", key_index)
for key in todo:
self.log.debug("MySQLCollector: %s regexp not matched " +
"in innodb status", key)
except Exception, innodb_status_error:
self.log.error('MySQLCollector: Couldnt get engine innodb ' +
'status, check user permissions: %s',
innodb_status_error)
Innodb_status_process_time = time.time() - innodb_status_timer
self.log.debug("MySQLCollector: innodb status process time: %f",
Innodb_status_process_time)
subkey = "Innodb_status_process_time"
metrics['innodb'][subkey] = Innodb_status_process_time
self.disconnect()
return metrics
def _publish_stats(self, nickname, metrics):
for key in metrics:
for metric_name in metrics[key]:
metric_value = metrics[key][metric_name]
if type(metric_value) is not float:
continue
###
#
# 20160713 DVG - This next block of code has been modified to not compute the
# differential for COUNTER metrics and not to turn them into RATES. Instead,
# we will publish them as COUNTERS.
#
###
# Default the metric type to GAUGE
metric_type = "GAUGE"
if metric_name not in self._GAUGE_KEYS:
# If it's not a GAUGE, assume it's a COUNTER
metric_type = "COUNTER"
# Do not compute differential; do not convert to a rate.
# metric_value = self.derivative(nickname + metric_name, metric_value)
if key == 'status':
if (('publish' not in self.config or
metric_name in self.config['publish'])):
# Publish the metric with the appropriate metric type (COUNTER or GAUGE)
self.publish(nickname + metric_name, metric_value, metric_type=metric_type)
else:
# Publish the metric with the appropriate metric type (COUNTER or GAUGE)
self.publish(nickname + metric_name, metric_value, metric_type=metric_type)
def collect(self):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return False
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)', host)
if not matches:
self.log.error(
'Connection string not in required format, skipping: %s',
host)
continue
params = {}
params['host'] = matches.group(3)
try:
params['port'] = int(matches.group(4))
except ValueError:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
if params['db'] == 'None':
del params['db']
try:
metrics = self.get_stats(params=params)
except Exception, e:
try:
self.disconnect()
except MySQLdb.ProgrammingError:
pass
self.log.error('Collection failed for %s %s', nickname, e)
continue
# Warn if publish contains an unknown variable
if 'publish' in self.config and metrics['status']:
for k in self.config['publish'].split():
if k not in metrics['status']:
self.log.error("No such key '%s' available, issue " +
"'show global status' for a full " +
"list", k)
self._publish_stats(nickname, metrics)
|
|
from flask import jsonify, request, g
from flask_cors import cross_origin
from alerta.app.auth.utils import permission
from alerta.app.exceptions import RejectException, RateLimit, BlackoutPeriod
from alerta.app.models.alert import Alert
from alerta.app.models.metrics import Timer, timer
from alerta.app.utils.api import jsonp, process_alert, process_status, add_remote_ip
from alerta.app.exceptions import ApiError
from . import api
receive_timer = Timer('alerts', 'received', 'Received alerts', 'Total time and number of received alerts')
gets_timer = Timer('alerts', 'queries', 'Alert queries', 'Total time and number of alert queries')
status_timer = Timer('alerts', 'status', 'Alert status change', 'Total time and number of alerts with status changed')
tag_timer = Timer('alerts', 'tagged', 'Tagging alerts', 'Total time to tag number of alerts')
untag_timer = Timer('alerts', 'untagged', 'Removing tags from alerts', 'Total time to un-tag and number of alerts')
attrs_timer = Timer('alerts', 'attributes', 'Alert attributes change', 'Total time and number of alerts with attributes changed')
delete_timer = Timer('alerts', 'deleted', 'Deleted alerts', 'Total time and number of deleted alerts')
count_timer = Timer('alerts', 'counts', 'Count alerts', 'Total time and number of count queries')
@api.route('/alert', methods=['OPTIONS', 'POST'])
@cross_origin()
@permission('write:alerts')
@timer(receive_timer)
@jsonp
def receive():
try:
incomingAlert = Alert.parse(request.json)
except ValueError as e:
raise ApiError(str(e), 400)
if g.get('customer', None):
incomingAlert.customer = g.get('customer')
add_remote_ip(request, incomingAlert)
try:
alert = process_alert(incomingAlert)
except RejectException as e:
raise ApiError(str(e), 403)
except RateLimit as e:
return jsonify(status="error", message=str(e), id=incomingAlert.id), 429
except BlackoutPeriod as e:
return jsonify(status="ok", message=str(e), id=incomingAlert.id), 202
except Exception as e:
raise ApiError(str(e), 500)
if alert:
return jsonify(status="ok", id=alert.id, alert=alert.serialize), 201
else:
raise ApiError("insert or update of received alert failed", 500)
@api.route('/alert/<alert_id>', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(gets_timer)
@jsonp
def get_alert(alert_id):
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if alert:
return jsonify(status="ok", total=1, alert=alert.serialize)
else:
raise ApiError("not found", 404)
# set status
@api.route('/alert/<alert_id>/status', methods=['OPTIONS', 'PUT'])
@cross_origin()
@permission('write:alerts')
@timer(status_timer)
@jsonp
def set_status(alert_id):
status = request.json.get('status', None)
text = request.json.get('text', '')
if not status:
raise ApiError("must supply 'status' as json data")
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if not alert:
raise ApiError("not found", 404)
try:
alert, status, text = process_status(alert, status, text)
except RejectException as e:
raise ApiError(str(e), 403)
except Exception as e:
raise ApiError(str(e), 500)
if alert.set_status(status, text):
return jsonify(status="ok")
else:
raise ApiError("failed to set alert status", 500)
# tag
@api.route('/alert/<alert_id>/tag', methods=['OPTIONS', 'PUT'])
@cross_origin()
@permission('write:alerts')
@timer(tag_timer)
@jsonp
def tag_alert(alert_id):
if not request.json.get('tags', None):
raise ApiError("must supply 'tags' as json list")
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if not alert:
raise ApiError("not found", 404)
if alert.tag(tags=request.json['tags']):
return jsonify(status="ok")
else:
raise ApiError("failed to tag alert", 500)
# untag
@api.route('/alert/<alert_id>/untag', methods=['OPTIONS', 'PUT'])
@cross_origin()
@permission('write:alerts')
@timer(untag_timer)
@jsonp
def untag_alert(alert_id):
if not request.json.get('tags', None):
raise ApiError("must supply 'tags' as json list")
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if not alert:
raise ApiError("not found", 404)
if alert.untag(tags=request.json['tags']):
return jsonify(status="ok")
else:
raise ApiError("failed to untag alert", 500)
# update attributes
@api.route('/alert/<alert_id>/attributes', methods=['OPTIONS', 'PUT'])
@cross_origin()
@permission('write:alerts')
@timer(attrs_timer)
@jsonp
def update_attributes(alert_id):
if not request.json.get('attributes', None):
raise ApiError("must supply 'attributes' as json data", 400)
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if not alert:
raise ApiError("not found", 404)
if alert.update_attributes(request.json['attributes']):
return jsonify(status="ok")
else:
raise ApiError("failed to update attributes", 500)
# delete
@api.route('/alert/<alert_id>', methods=['OPTIONS', 'DELETE'])
@cross_origin()
@permission('write:alerts')
@timer(delete_timer)
@jsonp
def delete_alert(alert_id):
customer = g.get('customer', None)
alert = Alert.get(alert_id, customer)
if not alert:
raise ApiError("not found", 404)
if alert.delete():
return jsonify(status="ok")
else:
raise ApiError("failed to delete alert", 500)
@api.route('/alerts', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(gets_timer)
@jsonp
def search_alerts():
query, sort, group, page, page_size, query_time = Alert.build_query(request.args)
severity_count = Alert.get_counts_by_severity(query)
status_count = Alert.get_counts_by_status(query)
total = sum(severity_count.values())
pages = ((total - 1) // page_size) + 1
if total and page > pages or page < 0:
raise ApiError("page out of range: 1-%s" % pages, 416)
alerts = Alert.find_all(query, sort, page, page_size)
if alerts:
return jsonify(
status="ok",
total=total,
page=page,
pageSize=page_size,
pages=pages,
more=page < pages,
alerts=[alert.serialize for alert in alerts],
statusCounts=status_count,
severityCounts=severity_count,
lastTime=max([alert.last_receive_time for alert in alerts])
)
else:
return jsonify(
status="ok",
message="not found",
total=0,
page=page,
pageSize=page_size,
pages=pages,
more=False,
alerts=[],
severityCounts=severity_count,
statusCounts=status_count,
lastTime=query_time
)
@api.route('/alerts/history', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(gets_timer)
@jsonp
def history():
query, _, _, _, _, _ = Alert.build_query(request.args)
history = Alert.get_history(query)
if history:
return jsonify(
status="ok",
history=history
)
else:
raise ApiError('No alert histories found', 404)
# severity counts
# status counts
@api.route('/alerts/count', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(count_timer)
@jsonp
def get_counts():
query, _, _, _, _, _ = Alert.build_query(request.args)
severity_count = Alert.get_counts_by_severity(query)
status_count = Alert.get_counts_by_status(query)
return jsonify(
status="ok",
total=sum(severity_count.values()),
severityCounts=severity_count,
statusCounts=status_count
)
# top 10 counts
@api.route('/alerts/top10/count', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(count_timer)
@jsonp
def get_top10_count():
query, _, _, _, _, _ = Alert.build_query(request.args)
top10 = Alert.get_top10_count(query)
if top10:
return jsonify(
status="ok",
total=len(top10),
top10=top10
)
else:
raise ApiError('No alerts found', 404)
# top 10 flapping
@api.route('/alerts/top10/flapping', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(count_timer)
@jsonp
def get_top10_flapping():
query, _, _, _, _, _ = Alert.build_query(request.args)
top10 = Alert.get_top10_flapping(query)
if top10:
return jsonify(
status="ok",
total=len(top10),
top10=top10
)
else:
raise ApiError('No flapping alerts found', 404)
# get alert environments
@api.route('/environments', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(gets_timer)
@jsonp
def get_environments():
query, _, _, _, _, _ = Alert.build_query(request.args)
environments = Alert.get_environments(query)
if environments:
return jsonify(
status="ok",
total=len(environments),
environments=environments
)
else:
raise ApiError('No environments found', 404)
# get alert services
@api.route('/services', methods=['OPTIONS', 'GET'])
@cross_origin()
@permission('read:alerts')
@timer(gets_timer)
@jsonp
def get_services():
query, _, _, _, _, _ = Alert.build_query(request.args)
services = Alert.get_services(query)
if services:
return jsonify(
status="ok",
total=len(services),
services=services
)
else:
raise ApiError('No services found', 404)
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.blocktools import create_block, create_coinbase, create_transaction, make_conform_to_ctor
from test_framework.messages import (
CTransaction,
FromHex,
msg_block,
msg_tx,
ToHex,
)
from test_framework.mininode import (
P2PInterface,
)
from test_framework.script import (
CScript,
CScriptNum,
OP_1NEGATE,
OP_CHECKLOCKTIMEVERIFY,
OP_DROP,
OP_TRUE,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.txtools import pad_tx
from test_framework.util import assert_equal
CLTV_HEIGHT = 1351
# Reject codes that we might receive in this test
REJECT_INVALID = 16
REJECT_OBSOLETE = 17
REJECT_NONSTANDARD = 64
def cltv_lock_to_height(node, tx, to_address, amount, height=-1):
'''Modify the scriptPubKey to add an OP_CHECKLOCKTIMEVERIFY, and make
a transaction that spends it.
This transforms the output script to anyone can spend (OP_TRUE) if the
lock time condition is valid.
Default height is -1 which leads CLTV to fail
TODO: test more ways that transactions using CLTV could be invalid (eg
locktime requirements fail, sequence time requirements fail, etc).
'''
height_op = OP_1NEGATE
if(height > 0):
tx.vin[0].nSequence = 0
tx.nLockTime = height
height_op = CScriptNum(height)
tx.vout[0].scriptPubKey = CScript(
[height_op, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE])
pad_tx(tx)
fundtx_raw = node.signrawtransactionwithwallet(ToHex(tx))['hex']
fundtx = FromHex(CTransaction(), fundtx_raw)
fundtx.rehash()
# make spending tx
inputs = [{
"txid": fundtx.hash,
"vout": 0
}]
output = {to_address: amount}
spendtx_raw = node.createrawtransaction(inputs, output)
spendtx = FromHex(CTransaction(), spendtx_raw)
pad_tx(spendtx)
return fundtx, spendtx
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-whitelist=127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].add_p2p_connection(P2PInterface())
self.log.info("Mining {} blocks".format(CLTV_HEIGHT - 2))
self.coinbase_txids = [self.nodes[0].getblock(
b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
self.nodeaddress = self.nodes[0].getnewaddress()
self.log.info(
"Test that an invalid-according-to-CLTV transaction can still appear in a block")
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[0],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98)
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(
CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.append(fundtx)
# include the -1 CLTV in block
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
self.nodes[0].p2p.sync_with_ping()
self.log.info(
"Test that invalid-according-to-cltv transactions cannot appear in a block")
block.nVersion = 4
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[1],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98)
# The funding tx only has unexecuted bad CLTV, in scriptpubkey; this is
# valid.
self.nodes[0].p2p.send_and_ping(msg_tx(fundtx))
assert fundtx.hash in self.nodes[0].getrawmempool()
# Mine a block containing the funding transaction
block.vtx.append(fundtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# We show that this tx is invalid due to CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{'txid': spendtx.hash, 'allowed': False,
'reject-reason': '64: non-mandatory-script-verify-flag (Negative locktime)'}],
self.nodes[0].testmempoolaccept(
rawtxs=[spendtx.serialize().hex()], maxfeerate=0)
)
rejectedtx_signed = self.nodes[0].signrawtransactionwithwallet(
ToHex(spendtx))
# Couldn't complete signature due to CLTV
assert rejectedtx_signed['errors'][0]['error'] == 'Negative locktime'
tip = block.hash
block_time += 1
block = create_block(
block.sha256, create_coinbase(CLTV_HEIGHT + 1), block_time)
block.nVersion = 4
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['ConnectBlock {} failed, blk-bad-inputs'.format(block.hash)]):
self.nodes[0].p2p.send_and_ping(msg_block(block))
assert_equal(self.nodes[0].getbestblockhash(), tip)
self.nodes[0].p2p.sync_with_ping()
self.log.info(
"Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
fundtx = create_transaction(self.nodes[0], self.coinbase_txids[2],
self.nodeaddress, 49.99)
fundtx, spendtx = cltv_lock_to_height(
self.nodes[0], fundtx, self.nodeaddress, 49.98, CLTV_HEIGHT)
# make sure sequence is nonfinal and locktime is good
spendtx.vin[0].nSequence = 0xfffffffe
spendtx.nLockTime = CLTV_HEIGHT
# both transactions are fully valid
self.nodes[0].sendrawtransaction(ToHex(fundtx))
self.nodes[0].sendrawtransaction(ToHex(spendtx))
# Modify the transactions in the block to be valid against CLTV
block.vtx.pop(1)
block.vtx.append(fundtx)
block.vtx.append(spendtx)
make_conform_to_ctor(block)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.nodes[0].p2p.send_and_ping(msg_block(block))
# This block is now valid
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
if __name__ == '__main__':
BIP65Test().main()
|
|
"""
Python module code representation.
:copyright: 2008 by Takanori Ishikawa
:license: MIT, see LICENSE for more details.
"""
from modipyd import utils, LOGGER, BYTECODE_PROCESSORS
from modipyd.resolve import ModuleNameResolver
from modipyd.utils import filepath_to_identifier
from modipyd.utils.decorators import require
from modipyd import bytecode as bc
# ----------------------------------------------------------------
# Loading, Compiling source code
# ----------------------------------------------------------------
def load_compiled(filepath):
"""
Load and initialize a byte-compiled code file and return
its ``code`` object. Return ``None`` if loading is failed.
"""
import marshal
import imp
fp = open(filepath, 'rb')
try:
if fp.read(4) != imp.get_magic():
raise ImportError, "Bad magic number in %s" % filepath
fp.read(4)
return marshal.load(fp)
finally:
fp.close()
def compile_source(filepath):
"""
Compile the source file at *filepath* into a code object.
Code objects can be executed by an exec statement or
evaluated by a call to ``eval()``.
"""
# line endings must be represented by a single newline character ('\n'),
# and the input must be terminated by at least one newline character.
fp = open(filepath, 'U')
try:
return compile(fp.read() + '\n', filepath, 'exec')
finally:
fp.close()
# ----------------------------------------------------------------
# Python Module Finder
# ----------------------------------------------------------------
# Bit masks for python relative file type
PYTHON_SOURCE_MASK = 1
PYTHON_COMPILED_MASK = 2
PYTHON_OPTIMIZED_MASK = 4
# file extention -> typebits mask
PYTHON_FILE_TYPES = {
'.py': PYTHON_SOURCE_MASK,
'.pyc': PYTHON_COMPILED_MASK,
'.pyo': PYTHON_OPTIMIZED_MASK,
}
@require(filename=basestring)
def module_file_typebits(filename):
from os.path import splitext
path, ext = splitext(filename)
typebits = PYTHON_FILE_TYPES.get(ext, 0)
return (path, ext, typebits)
def collect_python_module_file(filepath_or_list):
"""Generates (filepath without extention, bitmask)"""
modules = {}
for filepath in utils.collect_files(filepath_or_list, ['.?*', 'CVS']):
# For performance gain, use bitmask value
# instead of filepath string.
path, _, typebits = module_file_typebits(filepath)
modules.setdefault(path, 0)
modules[path] |= typebits
return (item for item in modules.iteritems() if item[1] > 0)
def collect_module_code(filepath_or_list, search_path=None):
resolver = ModuleNameResolver(search_path)
for filename, typebits in collect_python_module_file(filepath_or_list):
try:
yield read_module_code(filename,
search_path=search_path, typebits=typebits,
resolver=resolver, allow_compilation_failure=True)
except ImportError:
LOGGER.debug("Couldn't import file", exc_info=True)
@require(filename=basestring,
typebits=(int, None),
resolver=(ModuleNameResolver, None))
def read_module_code(filename, typebits=None, search_path=None,
resolver=None,
allow_compilation_failure=False,
allow_standalone=False):
"""
Read python module file, and return ``ModuleCode`` instance.
If *typebits* argument is not ``None``, *filename* must be
filepath without file extention.
If *typebits* argument is ``None``, it is detected by filename.
"""
if typebits is None:
filename, _, typebits = module_file_typebits(filename)
if resolver is None:
resolver = ModuleNameResolver(search_path)
code = None
try:
# Since editing .py files will not affect .pyc and .pyo files soon,
# give priority to .py files.
if typebits & PYTHON_SOURCE_MASK:
# .py
sourcepath = filename + '.py'
code = compile_source(sourcepath)
elif typebits & (PYTHON_OPTIMIZED_MASK | PYTHON_COMPILED_MASK):
# .pyc, .pyo
if typebits & PYTHON_OPTIMIZED_MASK:
sourcepath = filename + '.pyo'
else:
sourcepath = filename + '.pyc'
code = load_compiled(sourcepath)
else:
assert False, "illegal typebits: %d" % typebits
except (SyntaxError, ImportError):
LOGGER.warn(
"Exception occurred while loading compiled bytecode",
exc_info=True)
if not allow_compilation_failure:
raise
try:
module_name, package_name = resolver.resolve(sourcepath)
except ImportError:
if not allow_standalone:
raise
module_name = filepath_to_identifier(sourcepath)
package_name = None
return ModuleCode(module_name, package_name, sourcepath, code)
# ----------------------------------------------------------------
# Module class
# ----------------------------------------------------------------
# cached processor classes
BYTECODE_PROCESSORS_CACHE = []
def load_bytecode_processors():
"""
Loading BytecodeProcessor from modipyd.BYTECODE_PROCESSORS
settings. Return ChainedBytecodeProcessor instance holds
all loaded processors.
"""
if (BYTECODE_PROCESSORS and
(len(BYTECODE_PROCESSORS_CACHE) != len(BYTECODE_PROCESSORS))):
del BYTECODE_PROCESSORS_CACHE[:]
for i, name in enumerate(BYTECODE_PROCESSORS[:]):
LOGGER.info("Loading BytecodeProcesser '%s'" % name)
try:
klass = utils.import_component(name)
except (ImportError, AttributeError):
LOGGER.warn(
"Loading BytecodeProcesser '%s' failed. "
"This setting is removed" % name,
exc_info=True)
del BYTECODE_PROCESSORS[i]
else:
BYTECODE_PROCESSORS_CACHE.append(klass)
processors = []
for klass in BYTECODE_PROCESSORS_CACHE:
processors.append(klass())
return bc.ChainedBytecodeProcessor(processors)
class ModuleCode(object):
"""Python module representation"""
def __init__(self, modulename, packagename, filename, code):
"""
Instanciates and initialize ``ModuleCode`` object
>>> code = compile(
... "import os;"
... "from os.path import join as join_path",
... '<string>', 'exec')
>>> modcode = ModuleCode('__main__', '', code.co_filename, code)
>>> modcode.name
'__main__'
>>> modcode.filename
'<string>'
>>> imports = modcode.context['imports']
>>> len(imports)
2
>>> imports[0]
('os', 'os', -1)
>>> imports[1]
('join_path', 'os.path.join', -1)
"""
super(ModuleCode, self).__init__()
self.name = modulename
self.package_name = packagename
self.filename = filename
self.context = {}
if code is None:
# Maybe source file contains SyntaxError?
pass
else:
self.update_code(code)
def update_code(self, co):
self.context.clear()
processor = load_bytecode_processors()
bc.scan_code(co, processor, self.context)
def reload(self, co=None):
if co is None:
f = self.filename
if utils.python_source_file(f):
co = compile_source(f)
elif utils.python_compiled_file(f):
co = load_compiled(f)
else:
raise ImportError("No module named %s at %s" % (self.name, f))
self.update_code(co)
return co
def __str__(self):
return "<ModuleCode '%s' (%s)>" % (self.name, self.filename)
def __eq__(self, other):
return (self is other or
(isinstance(other, type(self)) and
self.name == other.name))
def __hash__(self):
return hash(self.name)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# coding=utf-8
# -*- coding: utf-8 -*-
u"""This module provides class for managing resource configuration."""
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import logging
import f5_cccl.exceptions as cccl_exc
from f5.sdk_exception import F5SDKError
from icontrol.exceptions import iControlUnexpectedHTTPError
from requests.utils import quote as urlquote
LOGGER = logging.getLogger(__name__)
class Resource(object):
u"""Resource super class to wrap BIG-IP configuration objects.
A Resource represents a piece of CCCL configuration as represented
by the cccl-api-schema. It's purpose is to wrap configuration into
an object that can later be used to perform Create, Read, Update,
and Delete operations on the BIG-IP
Data should only be initialized on creation of the Resouce object
and not modified. If a new representation is required a Resouce
object with the corrected model should be created and the original
discarded.
The subclasses of Resource should only concern themselves with
manipuation of the schema data to pack it into a payload that
can be used to perform create, modify, and delete operations.
Therefore, it is expected that the specialization of subclasses will
be concentrated in the __init__, update, and __eq__ methodss.
All subclasses are expected to implement the _uri_path method so
that the appropriate resource URI is used when performing CRUD.
"""
@classmethod
def classname(cls):
"""Return the class name of the resource."""
return cls.__name__
def __init__(self, name, partition):
u"""Initialize a BIG-IP resource object from a CCCL schema object.
Args:
name (string): the name of the resource
partition (string): the resource partition
"""
if not name:
LOGGER.error("Resource instantiation error: undefined name")
raise ValueError(
"must have at least name({})".format(name))
self._data = dict()
self._data['name'] = name
self._data['partition'] = partition
def __eq__(self, resource):
u"""Compare two resources for equality.
Args:
resouce (Resource): The resource to compare
Return:
True if equal
False otherwise
"""
return self._data == resource.data
def __ne__(self, resource):
return not self.__eq__(resource)
def __hash__(self):
return hash((self.name, self.partition))
def __lt__(self, resource):
return self.full_path() < resource.full_path()
def __str__(self):
return str(self._data)
def create(self, bigip):
u"""Create resource on a BIG-IP system.
The internal data model is applied to the BIG-IP
Args:
bigip (f5.bigip.ManagementRoot): F5 SDK session object
Returns: created resource object.
Raises:
F5CcclResourceCreateError: resouce cannot be created for an
unspecified reason.
F5CcclResourceConflictError: resouce cannot be created because
it already exists on the BIG-IP
"""
LOGGER.info("Creating %s: /%s/%s",
self.classname(), self.partition, self.name)
try:
obj = self._uri_path(bigip).create(**self._data)
return obj
except iControlUnexpectedHTTPError as err:
self._handle_http_error(err)
except F5SDKError as err:
LOGGER.error("Create FAILED: /%s/%s", self.partition, self.name)
raise cccl_exc.F5CcclResourceCreateError(str(err))
def read(self, bigip):
u"""Retrieve a BIG-IP resource from a BIG-IP.
Returns a resource object with attributes for instance on a
BIG-IP system.
Args:
bigip (f5.bigip.ManagementRoot): F5 SDK session object
Returns: resource retrieved from BIG-IP
Raises:
F5CcclResourceNotFoundError: resouce cannot be loaded because
it does not exist on the BIG-IP
"""
LOGGER.info("Loading %s: /%s/%s",
self.classname(), self.partition, self.name)
try:
obj = self._uri_path(bigip).load(
name=urlquote(self.name),
partition=self.partition)
return obj
except iControlUnexpectedHTTPError as err:
self._handle_http_error(err)
except F5SDKError as err:
LOGGER.error("Load FAILED: /%s/%s", self.partition, self.name)
raise cccl_exc.F5CcclError(str(err))
def update(self, bigip, data=None, modify=False):
u"""Update a resource (e.g., pool) on a BIG-IP system.
Modifies a resource on a BIG-IP system using attributes
defined in the model object.
The internal data model is applied to the BIG-IP
Args:
bigip: BigIP instance to use for updating resource.
data: Applies mostly for 'patching' or modify, but contains targets
for update operation specifically
modify: Specifies if this is a modify, or patch of specific
Key/Value Pairs rather than the whole object
Raises:
F5CcclResourceUpdateError: resouce cannot be updated for an
unspecified reason.
F5CcclResourceNotFoundError: resouce cannot be updated because
it does not exist on the BIG-IP
"""
LOGGER.info("Updating %s: /%s/%s",
self.classname(), self.partition, self.name)
if not data:
data = self._data
try:
obj = self._uri_path(bigip).load(
name=urlquote(self.name),
partition=self.partition)
payload = copy.copy(data)
if modify:
obj.modify(**payload)
else:
obj.update(**payload)
except iControlUnexpectedHTTPError as err:
self._handle_http_error(err)
except F5SDKError as err:
LOGGER.error("Update FAILED: /%s/%s", self.partition, self.name)
raise cccl_exc.F5CcclResourceUpdateError(str(err))
def delete(self, bigip):
u"""Delete a resource on a BIG-IP system.
Loads a resource and deletes it.
Args:
bigip: BigIP instance to use for delete resource.
Raises:
F5CcclResourceDeleteError: resouce cannot be deleted for an
unspecified reason.
F5CcclResourceNotFoundError: resouce cannot be deleted because
it already exists on the BIG-IP
"""
LOGGER.info("Deleting %s: /%s/%s",
self.classname(), self.partition, self.name)
try:
obj = self._uri_path(bigip).load(
name=urlquote(self.name),
partition=self.partition)
obj.delete()
except AttributeError as err:
msg = "Could not delete {}, is it present on the BIG-IP?".format(
str(self))
raise cccl_exc.F5CcclResourceDeleteError(msg)
except iControlUnexpectedHTTPError as err:
self._handle_http_error(err)
except F5SDKError as err:
LOGGER.error("Delete FAILED: /%s/%s", self.partition, self.name)
raise cccl_exc.F5CcclResourceDeleteError(str(err))
@property
def name(self):
u"""Get the name for this resource."""
return self._data['name']
@property
def partition(self):
u"""Get the partition for this resource."""
return self._data['partition']
@property
def data(self):
u"""Get the internal data model for this resource."""
return self._data
def full_path(self):
u"""Concatenate the partition and name to form fullPath."""
return "/{}/{}".format(self.partition, self.name)
def _uri_path(self, bigip):
u"""Get the URI resource path key for the F5 SDK.
For example, a pool resource returns:
bigip.tm.ltm.pools.pool
This needs to be implemented by a Resouce subclass.
"""
raise NotImplementedError
def _handle_http_error(self, error):
u"""Extract the error code and reraise a CCCL Error."""
code = error.response.status_code
LOGGER.error(
"HTTP error(%d): CCCL resource(%s) /%s/%s.",
code, self.classname(), self.partition, self.name)
if code == 404:
raise cccl_exc.F5CcclResourceNotFoundError(str(error))
elif code == 409:
raise cccl_exc.F5CcclResourceConflictError(str(error))
elif code >= 400 and code < 500:
raise cccl_exc.F5CcclResourceRequestError(str(error))
else:
raise cccl_exc.F5CcclError(str(error))
|
|
# coding: utf-8
"""
Kubeflow Training SDK
Python SDK for Kubeflow Training # noqa: E501
The version of the OpenAPI document: v1.4.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubeflow.training.configuration import Configuration
class V1JobStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'completion_time': 'V1Time',
'conditions': 'list[V1JobCondition]',
'last_reconcile_time': 'V1Time',
'replica_statuses': 'dict(str, V1ReplicaStatus)',
'start_time': 'V1Time'
}
attribute_map = {
'completion_time': 'completionTime',
'conditions': 'conditions',
'last_reconcile_time': 'lastReconcileTime',
'replica_statuses': 'replicaStatuses',
'start_time': 'startTime'
}
def __init__(self, completion_time=None, conditions=None, last_reconcile_time=None, replica_statuses=None, start_time=None, local_vars_configuration=None): # noqa: E501
"""V1JobStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._completion_time = None
self._conditions = None
self._last_reconcile_time = None
self._replica_statuses = None
self._start_time = None
self.discriminator = None
if completion_time is not None:
self.completion_time = completion_time
self.conditions = conditions
if last_reconcile_time is not None:
self.last_reconcile_time = last_reconcile_time
self.replica_statuses = replica_statuses
if start_time is not None:
self.start_time = start_time
@property
def completion_time(self):
"""Gets the completion_time of this V1JobStatus. # noqa: E501
:return: The completion_time of this V1JobStatus. # noqa: E501
:rtype: V1Time
"""
return self._completion_time
@completion_time.setter
def completion_time(self, completion_time):
"""Sets the completion_time of this V1JobStatus.
:param completion_time: The completion_time of this V1JobStatus. # noqa: E501
:type: V1Time
"""
self._completion_time = completion_time
@property
def conditions(self):
"""Gets the conditions of this V1JobStatus. # noqa: E501
Conditions is an array of current observed job conditions. # noqa: E501
:return: The conditions of this V1JobStatus. # noqa: E501
:rtype: list[V1JobCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1JobStatus.
Conditions is an array of current observed job conditions. # noqa: E501
:param conditions: The conditions of this V1JobStatus. # noqa: E501
:type: list[V1JobCondition]
"""
if self.local_vars_configuration.client_side_validation and conditions is None: # noqa: E501
raise ValueError("Invalid value for `conditions`, must not be `None`") # noqa: E501
self._conditions = conditions
@property
def last_reconcile_time(self):
"""Gets the last_reconcile_time of this V1JobStatus. # noqa: E501
:return: The last_reconcile_time of this V1JobStatus. # noqa: E501
:rtype: V1Time
"""
return self._last_reconcile_time
@last_reconcile_time.setter
def last_reconcile_time(self, last_reconcile_time):
"""Sets the last_reconcile_time of this V1JobStatus.
:param last_reconcile_time: The last_reconcile_time of this V1JobStatus. # noqa: E501
:type: V1Time
"""
self._last_reconcile_time = last_reconcile_time
@property
def replica_statuses(self):
"""Gets the replica_statuses of this V1JobStatus. # noqa: E501
ReplicaStatuses is map of ReplicaType and ReplicaStatus, specifies the status of each replica. # noqa: E501
:return: The replica_statuses of this V1JobStatus. # noqa: E501
:rtype: dict(str, V1ReplicaStatus)
"""
return self._replica_statuses
@replica_statuses.setter
def replica_statuses(self, replica_statuses):
"""Sets the replica_statuses of this V1JobStatus.
ReplicaStatuses is map of ReplicaType and ReplicaStatus, specifies the status of each replica. # noqa: E501
:param replica_statuses: The replica_statuses of this V1JobStatus. # noqa: E501
:type: dict(str, V1ReplicaStatus)
"""
if self.local_vars_configuration.client_side_validation and replica_statuses is None: # noqa: E501
raise ValueError("Invalid value for `replica_statuses`, must not be `None`") # noqa: E501
self._replica_statuses = replica_statuses
@property
def start_time(self):
"""Gets the start_time of this V1JobStatus. # noqa: E501
:return: The start_time of this V1JobStatus. # noqa: E501
:rtype: V1Time
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this V1JobStatus.
:param start_time: The start_time of this V1JobStatus. # noqa: E501
:type: V1Time
"""
self._start_time = start_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobStatus):
return True
return self.to_dict() != other.to_dict()
|
|
"""The Game of Hog."""
from dice import four_sided, six_sided, make_test_dice
from ucb import main, trace, log_current_line, interact
GOAL_SCORE = 100 # The goal of Hog is to score 100 points.
######################
# Phase 1: Simulator #
######################
def roll_dice(num_rolls, dice=six_sided):
"""Roll DICE for NUM_ROLLS times. Return either the sum of the outcomes,
or 1 if a 1 is rolled (Pig out). This calls DICE exactly NUM_ROLLS times.
num_rolls: The number of dice rolls that will be made; at least 1.
dice: A zero-argument function that returns an integer outcome.
"""
# These assert statements ensure that num_rolls is a positive integer.
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls > 0, 'Must roll at least once.'
"*** YOUR CODE HERE ***"
def take_turn(num_rolls, opponent_score, dice=six_sided):
"""Simulate a turn rolling NUM_ROLLS dice, which may be 0 (Free bacon).
num_rolls: The number of dice rolls that will be made.
opponent_score: The total score of the opponent.
dice: A function of no args that returns an integer outcome.
"""
assert type(num_rolls) == int, 'num_rolls must be an integer.'
assert num_rolls >= 0, 'Cannot roll a negative number of dice.'
assert num_rolls <= 10, 'Cannot roll more than 10 dice.'
assert opponent_score < 100, 'The game should be over.'
"*** YOUR CODE HERE ***"
edits
def select_dice(score, opponent_score):
"""Select six-sided dice unless the sum of SCORE and OPPONENT_SCORE is a
multiple of 7, in which case select four-sided dice (Hog wild).
"""
"*** YOUR CODE HERE ***"
def bid_for_start(bid0, bid1, goal=GOAL_SCORE):
"""Given the bids BID0 and BID1 of each player, returns three values:
- the starting score of player 0
- the starting score of player 1
- the number of the player who rolls first (0 or 1)
"""
assert bid0 >= 0 and bid1 >= 0, "Bids should be non-negative!"
assert type(bid0) == int and type(bid1) == int, "Bids should be integers!"
# The buggy code is below:
if bid0 == bid1:
return 0, goal, goal
if bid0 == bid1 - 5:
return 0, 0, 0
if bid1 == bid0 + 5:
return 10, 0, 1
if bid1 > bid0:
return bid1, bid0, 0
else:
return bid0, bid1, 1
def other(who):
"""Return the other player, for a player WHO numbered 0 or 1.
>>> other(0)
1
>>> other(1)
0
"""
return 1 - who
def play(strategy0, strategy1, score0=0, score1=0, goal=GOAL_SCORE):
"""Simulate a game and return the final scores of both players, with
Player 0's score first, and Player 1's score second.
A strategy is a function that takes two total scores as arguments
(the current player's score, and the opponent's score), and returns a
number of dice that the current player will roll this turn.
strategy0: The strategy function for Player 0, who plays first
strategy1: The strategy function for Player 1, who plays second
score0 : The starting score for Player 0
score1 : The starting score for Player 1
"""
who = 0 # Which player is about to take a turn, 0 (first) or 1 (second)
"*** YOUR CODE HERE ***"
return score0, score1 # You may want to change this line.
#######################
# Phase 2: Strategies #
#######################
def always_roll(n):
"""Return a strategy that always rolls N dice.
A strategy is a function that takes two total scores as arguments
(the current player's score, and the opponent's score), and returns a
number of dice that the current player will roll this turn.
>>> strategy = always_roll(5)
>>> strategy(0, 0)
5
>>> strategy(99, 99)
5
"""
def strategy(score, opponent_score):
return n
return strategy
# Experiments
def make_averaged(fn, num_samples=1000):
"""Return a function that returns the average_value of FN when called.
To implement this function, you will have to use *args syntax, a new Python
feature introduced in this project. See the project description.
>>> dice = make_test_dice(3, 1, 5, 6)
>>> averaged_dice = make_averaged(dice, 1000)
>>> averaged_dice()
3.75
>>> make_averaged(roll_dice, 1000)(2, dice)
6.0
In this last example, two different turn scenarios are averaged.
- In the first, the player rolls a 3 then a 1, receiving a score of 1.
- In the other, the player rolls a 5 and 6, scoring 11.
Thus, the average value is 6.0.
"""
"*** YOUR CODE HERE ***"
def max_scoring_num_rolls(dice=six_sided):
"""Return the number of dice (1 to 10) that gives the highest average turn
score by calling roll_dice with the provided DICE. Assume that dice always
return positive outcomes.
>>> dice = make_test_dice(3)
>>> max_scoring_num_rolls(dice)
10
"""
"*** YOUR CODE HERE ***"
def winner(strategy0, strategy1):
"""Return 0 if strategy0 wins against strategy1, and 1 otherwise."""
score0, score1 = play(strategy0, strategy1)
if score0 > score1:
return 0
else:
return 1
def average_win_rate(strategy, baseline=always_roll(5)):
"""Return the average win rate (0 to 1) of STRATEGY against BASELINE."""
win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)
win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)
return (win_rate_as_player_0 + win_rate_as_player_1) / 2 # Average results
def run_experiments():
"""Run a series of strategy experiments and report results."""
if True: # Change to False when done finding max_scoring_num_rolls
six_sided_max = max_scoring_num_rolls(six_sided)
print('Max scoring num rolls for six-sided dice:', six_sided_max)
four_sided_max = max_scoring_num_rolls(four_sided)
print('Max scoring num rolls for four-sided dice:', four_sided_max)
if False: # Change to True to test always_roll(8)
print('always_roll(8) win rate:', average_win_rate(always_roll(8)))
if False: # Change to True to test bacon_strategy
print('bacon_strategy win rate:', average_win_rate(bacon_strategy))
if False: # Change to True to test swap_strategy
print('swap_strategy win rate:', average_win_rate(swap_strategy))
if False: # Change to True to test final_strategy
print('final_strategy win rate:', average_win_rate(final_strategy))
"*** You may add additional experiments as you wish ***"
# Strategies
def bacon_strategy(score, opponent_score, margin=8, num_rolls=5):
"""This strategy rolls 0 dice if that gives at least MARGIN points,
and rolls NUM_ROLLS otherwise.
"""
"*** YOUR CODE HERE ***"
return None # Replace this statement
def swap_strategy(score, opponent_score, margin=8, num_rolls=5):
"""This strategy rolls 0 dice when it would result in a beneficial swap and
rolls NUM_ROLLS if it would result in a harmful swap. It also rolls
0 dice if that gives at least MARGIN points and rolls
NUM_ROLLS otherwise.
"""
"*** YOUR CODE HERE ***"
return None # Replace this statement
def final_strategy(score, opponent_score):
"""Write a brief description of your final strategy.
*** YOUR DESCRIPTION HERE ***
"""
"*** YOUR CODE HERE ***"
return 5 # Replace this statement
##########################
# Command Line Interface #
##########################
# Note: Functions in this section do not need to be changed. They use features
# of Python not yet covered in the course.
@main
def run(*args):
"""Read in the command-line argument and calls corresponding functions.
This function uses Python syntax/techniques not yet covered in this course.
"""
import argparse
parser = argparse.ArgumentParser(description="Play Hog")
parser.add_argument('--run_experiments', '-r', action='store_true',
help='Runs strategy experiments')
args = parser.parse_args()
if args.run_experiments:
run_experiments()
|
|
# Copyright 2017 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Parses MuJoCo header files and generates Python bindings."""
import os
import pprint
import textwrap
from absl import logging
from dm_control.autowrap import c_declarations
from dm_control.autowrap import codegen_util
from dm_control.autowrap import header_parsing
import pyparsing
# Absolute path to the top-level module.
_MODULE = "dm_control.mujoco.wrapper"
class Error(Exception):
pass
class BindingGenerator:
"""Parses declarations from MuJoCo headers and generates Python bindings."""
def __init__(self,
enums_dict=None,
consts_dict=None,
typedefs_dict=None,
hints_dict=None,
types_dict=None,
funcs_dict=None,
strings_dict=None,
func_ptrs_dict=None,
index_dict=None):
"""Constructs a new HeaderParser instance.
The optional arguments listed below can be used to passing in dict-like
objects specifying pre-defined declarations. By default empty
UniqueOrderedDicts will be instantiated and then populated according to the
contents of the headers.
Args:
enums_dict: Nested mappings from {enum_name: {member_name: value}}.
consts_dict: Mapping from {const_name: value}.
typedefs_dict: Mapping from {type_name: ctypes_typename}.
hints_dict: Mapping from {var_name: shape_tuple}.
types_dict: Mapping from {type_name: type_instance}.
funcs_dict: Mapping from {func_name: Function_instance}.
strings_dict: Mapping from {var_name: StaticStringArray_instance}.
func_ptrs_dict: Mapping from {var_name: FunctionPtr_instance}.
index_dict: Mapping from {lowercase_struct_name: {var_name: shape_tuple}}.
"""
self.enums_dict = (enums_dict if enums_dict is not None
else codegen_util.UniqueOrderedDict())
self.consts_dict = (consts_dict if consts_dict is not None
else codegen_util.UniqueOrderedDict())
self.typedefs_dict = (typedefs_dict if typedefs_dict is not None
else codegen_util.UniqueOrderedDict())
self.hints_dict = (hints_dict if hints_dict is not None
else codegen_util.UniqueOrderedDict())
self.types_dict = (types_dict if types_dict is not None
else codegen_util.UniqueOrderedDict())
self.funcs_dict = (funcs_dict if funcs_dict is not None
else codegen_util.UniqueOrderedDict())
self.strings_dict = (strings_dict if strings_dict is not None
else codegen_util.UniqueOrderedDict())
self.func_ptrs_dict = (func_ptrs_dict if func_ptrs_dict is not None
else codegen_util.UniqueOrderedDict())
self.index_dict = (index_dict if index_dict is not None
else codegen_util.UniqueOrderedDict())
def get_consts_and_enums(self):
consts_and_enums = self.consts_dict.copy()
for enum in self.enums_dict.values():
consts_and_enums.update(enum)
return consts_and_enums
def resolve_size(self, old_size):
"""Resolves an array size identifier.
The following conversions will be attempted:
* If `old_size` is an integer it will be returned as-is.
* If `old_size` is a string of the form `"3"` it will be cast to an int.
* If `old_size` is a string in `self.consts_dict` then the value of the
constant will be returned.
* If `old_size` is a string of the form `"3*constant_name"` then the
result of `3*constant_value` will be returned.
* If `old_size` is a string that does not specify an int constant and
cannot be cast to an int (e.g. an identifier for a dynamic dimension,
such as `"ncontact"`) then it will be returned as-is.
Args:
old_size: An int or string.
Returns:
An int or string.
"""
if isinstance(old_size, int):
return old_size # If it's already an int then there's nothing left to do
elif "*" in old_size:
# If it's a string specifying a product (such as "2*mjMAXLINEPNT"),
# recursively resolve the components to ints and calculate the result.
size = 1
sizes = []
is_int = True
for part in old_size.split("*"):
dim = self.resolve_size(part)
sizes.append(dim)
if not isinstance(dim, int):
is_int = False
else:
size *= dim
if is_int:
return size
else:
return tuple(sizes)
else:
# Recursively dereference any sizes declared in header macros
size = codegen_util.recursive_dict_lookup(old_size,
self.get_consts_and_enums())
# Try to coerce the result to an int, return a string if this fails
return codegen_util.try_coerce_to_num(size, try_types=(int,))
def get_shape_tuple(self, old_size, squeeze=False):
"""Generates a shape tuple from parser results.
Args:
old_size: Either a `pyparsing.ParseResults`, or a valid int or string
input to `self.resolve_size` (see method docstring for further details).
squeeze: If True, any dimensions that are statically defined as 1 will be
removed from the shape tuple.
Returns:
A shape tuple containing ints for dimensions that are statically defined,
and string size identifiers for dimensions that can only be determined at
runtime.
"""
if isinstance(old_size, pyparsing.ParseResults):
# For multi-dimensional arrays, convert each dimension separately
shape = tuple(self.resolve_size(dim) for dim in old_size)
else:
shape = (self.resolve_size(old_size),)
if squeeze:
shape = tuple(d for d in shape if d != 1) # Remove singleton dimensions
return shape
def resolve_typename(self, old_ctypes_typename):
"""Gets a qualified ctypes typename from typedefs_dict and C_TO_CTYPES."""
# Recursively dereference any typenames declared in self.typedefs_dict
new_ctypes_typename = codegen_util.recursive_dict_lookup(
old_ctypes_typename, self.typedefs_dict)
# Try to convert to a ctypes native typename
new_ctypes_typename = header_parsing.C_TO_CTYPES.get(
new_ctypes_typename, new_ctypes_typename)
if new_ctypes_typename == old_ctypes_typename:
logging.warning("Could not resolve typename '%s'", old_ctypes_typename)
return new_ctypes_typename
def get_type_from_token(self, token, parent=None):
"""Accepts a token returned by a parser, returns a subclass of CDeclBase."""
comment = codegen_util.mangle_comment(token.comment)
is_const = token.is_const == "const"
# An anonymous union declaration
if token.anonymous_union:
if not parent and parent.name:
raise Error(
"Anonymous unions must be members of a named struct or union.")
# Generate a name based on the name of the parent.
name = codegen_util.mangle_varname(parent.name + "_anon_union")
members = codegen_util.UniqueOrderedDict()
sub_structs = codegen_util.UniqueOrderedDict()
out = c_declarations.AnonymousUnion(
name, members, sub_structs, comment, parent)
# Add members
for sub_token in token.members:
# Recurse into nested structs
member = self.get_type_from_token(sub_token, parent=out)
out.members[member.name] = member
# Nested sub-structures need special treatment
if isinstance(member, c_declarations.Struct):
out.sub_structs[member.name] = member
# Add to dict of unions
self.types_dict[out.ctypes_typename] = out
# A struct declaration
elif token.members:
name = token.name
# If the name is empty, see if there is a type declaration that matches
# this struct's typename
if not name:
for k, v in self.typedefs_dict.items():
if v == token.typename:
name = k
# Anonymous structs need a dummy typename
typename = token.typename
if not typename:
if parent:
typename = token.name
else:
raise Error(
"Anonymous structs that aren't members of a named struct are not "
"supported (name = '{token.name}').".format(token=token))
# Mangle the name if it contains any protected keywords
name = codegen_util.mangle_varname(name)
members = codegen_util.UniqueOrderedDict()
sub_structs = codegen_util.UniqueOrderedDict()
out = c_declarations.Struct(name, typename, members, sub_structs, comment,
parent, is_const)
# Map the old typename to the mangled typename in typedefs_dict
self.typedefs_dict[typename] = out.ctypes_typename
# Add members
for sub_token in token.members:
# Recurse into nested structs
member = self.get_type_from_token(sub_token, parent=out)
out.members[member.name] = member
# Nested sub-structures need special treatment
if isinstance(member, c_declarations.Struct):
out.sub_structs[member.name] = member
# Add to dict of structs
self.types_dict[out.ctypes_typename] = out
else:
name = codegen_util.mangle_varname(token.name)
typename = self.resolve_typename(token.typename)
# 1D array with size defined at compile time
if token.size:
shape = self.get_shape_tuple(token.size)
if typename in {header_parsing.NONE, header_parsing.CTYPES_CHAR}:
out = c_declarations.StaticPtrArray(
name, typename, shape, comment, parent, is_const)
else:
out = c_declarations.StaticNDArray(
name, typename, shape, comment, parent, is_const)
elif token.ptr:
# Pointer to a numpy-compatible type, could be an array or a scalar
if typename in header_parsing.CTYPES_TO_NUMPY:
# Multidimensional array (one or more dimensions might be undefined)
if name in self.hints_dict:
# Dynamically-sized dimensions have string identifiers
shape = self.hints_dict[name]
if any(isinstance(d, str) for d in shape):
out = c_declarations.DynamicNDArray(name, typename, shape,
comment, parent, is_const)
else:
out = c_declarations.StaticNDArray(name, typename, shape, comment,
parent, is_const)
# This must be a pointer to a scalar primitive
else:
out = c_declarations.ScalarPrimitivePtr(name, typename, comment,
parent, is_const)
# Pointer to struct or other arbitrary type
else:
out = c_declarations.ScalarPrimitivePtr(name, typename, comment,
parent, is_const)
# A struct we've already encountered
elif typename in self.types_dict:
s = self.types_dict[typename]
if isinstance(s, c_declarations.FunctionPtrTypedef):
out = c_declarations.FunctionPtr(
name, token.name, s.typename, comment)
else:
out = c_declarations.Struct(name, s.typename, s.members,
s.sub_structs, comment, parent)
# Presumably this is a scalar primitive
else:
out = c_declarations.ScalarPrimitive(name, typename, comment, parent,
is_const)
return out
# Parsing functions.
# ----------------------------------------------------------------------------
def parse_hints(self, xmacro_src):
"""Parses mjxmacro.h, update self.hints_dict."""
parser = header_parsing.XMACRO
for tokens, _, _ in parser.scanString(xmacro_src):
for xmacro in tokens:
for member in xmacro.members:
# "Squeeze out" singleton dimensions.
shape = self.get_shape_tuple(member.dims, squeeze=True)
self.hints_dict.update({member.name: shape})
if codegen_util.is_macro_pointer(xmacro.name):
struct_name = codegen_util.macro_struct_name(xmacro.name)
if struct_name not in self.index_dict:
self.index_dict[struct_name] = {}
self.index_dict[struct_name].update({member.name: shape})
def parse_enums(self, src):
"""Parses mj*.h, update self.enums_dict."""
parser = header_parsing.ENUM_DECL
for tokens, _, _ in parser.scanString(src):
for enum in tokens:
members = codegen_util.UniqueOrderedDict()
value = 0
for member in enum.members:
# Leftward bitshift
if member.bit_lshift_a:
value = int(member.bit_lshift_a) << int(member.bit_lshift_b)
# Assignment
elif member.value:
value = int(member.value)
# Implicit count
else:
value += 1
members.update({member.name: value})
self.enums_dict.update({enum.name: members})
def parse_consts_typedefs(self, src):
"""Updates self.consts_dict, self.typedefs_dict."""
parser = (header_parsing.COND_DECL |
header_parsing.UNCOND_DECL)
for tokens, _, _ in parser.scanString(src):
self.recurse_into_conditionals(tokens)
def recurse_into_conditionals(self, tokens):
"""Called recursively within nested #if(n)def... #else... #endif blocks."""
for token in tokens:
# Another nested conditional block
if token.predicate:
if (token.predicate in self.get_consts_and_enums()
and self.get_consts_and_enums()[token.predicate]):
self.recurse_into_conditionals(token.if_true)
else:
self.recurse_into_conditionals(token.if_false)
# One or more declarations
else:
if token.typename:
self.typedefs_dict.update({token.name: token.typename})
elif token.value:
value = codegen_util.try_coerce_to_num(token.value)
# Avoid adding function aliases.
if isinstance(value, str):
continue
else:
self.consts_dict.update({token.name: value})
else:
self.consts_dict.update({token.name: True})
def parse_structs_and_function_pointer_typedefs(self, src):
"""Updates self.types_dict."""
parser = (header_parsing.NESTED_STRUCTS |
header_parsing.FUNCTION_PTR_TYPE_DECL)
for tokens, _, _ in parser.scanString(src):
for token in tokens:
if token.return_type:
# This is a function type declaration.
self.types_dict[token.typename] = c_declarations.FunctionPtrTypedef(
token.typename,
self.get_type_from_token(token.return_type),
tuple(self.get_type_from_token(arg) for arg in token.arguments))
else:
# This is a struct or a union.
self.get_type_from_token(token)
def parse_functions(self, src):
"""Updates self.funcs_dict."""
parser = header_parsing.MJAPI_FUNCTION_DECL
for tokens, _, _ in parser.scanString(src):
for token in tokens:
name = codegen_util.mangle_varname(token.name)
comment = codegen_util.mangle_comment(token.comment)
if token.arguments:
args = codegen_util.UniqueOrderedDict()
for arg in token.arguments:
a = self.get_type_from_token(arg)
args[a.name] = a
else:
args = None
if token.return_value:
ret_val = self.get_type_from_token(token.return_value)
else:
ret_val = None
func = c_declarations.Function(name, args, ret_val, comment)
self.funcs_dict[func.name] = func
def parse_global_strings(self, src):
"""Updates self.strings_dict."""
parser = header_parsing.MJAPI_STRING_ARRAY
for token, _, _ in parser.scanString(src):
name = codegen_util.mangle_varname(token.name)
shape = self.get_shape_tuple(token.dims)
self.strings_dict[name] = c_declarations.StaticStringArray(
name, shape, symbol_name=token.name)
def parse_function_pointers(self, src):
"""Updates self.func_ptrs_dict."""
parser = header_parsing.MJAPI_FUNCTION_PTR
for token, _, _ in parser.scanString(src):
name = codegen_util.mangle_varname(token.name)
self.func_ptrs_dict[name] = c_declarations.FunctionPtr(
name, symbol_name=token.name,
type_name=token.typename, comment=token.comment)
# Code generation methods
# ----------------------------------------------------------------------------
def make_header(self, imports=()):
"""Returns a header string for an auto-generated Python source file."""
docstring = textwrap.dedent("""
\"\"\"Automatically generated by {scriptname:}.
MuJoCo header version: {mujoco_version:}
\"\"\"
""".format(scriptname=os.path.split(__file__)[-1],
mujoco_version=self.consts_dict["mjVERSION_HEADER"]))
docstring = docstring[1:] # Strip the leading line break.
return "\n".join([docstring] + list(imports) + ["\n"])
def write_consts(self, fname):
"""Write constants."""
imports = [
"# pylint: disable=invalid-name",
]
with open(fname, "w") as f:
f.write(self.make_header(imports))
f.write(codegen_util.comment_line("Constants") + "\n")
for name, value in self.consts_dict.items():
f.write("{0} = {1}\n".format(name, value))
f.write("\n" + codegen_util.comment_line("End of generated code"))
def write_enums(self, fname):
"""Write enum definitions."""
with open(fname, "w") as f:
imports = [
"import collections",
"# pylint: disable=invalid-name",
"# pylint: disable=line-too-long",
]
f.write(self.make_header(imports))
f.write(codegen_util.comment_line("Enums"))
for enum_name, members in self.enums_dict.items():
fields = ["\"{}\"".format(name) for name in members.keys()]
values = [str(value) for value in members.values()]
s = textwrap.dedent("""
{0} = collections.namedtuple(
"{0}",
[{1}]
)({2})
""").format(enum_name, ",\n ".join(fields), ", ".join(values))
f.write(s)
f.write("\n" + codegen_util.comment_line("End of generated code"))
def write_types(self, fname):
"""Write ctypes struct and function type declarations."""
imports = [
"import ctypes",
]
with open(fname, "w") as f:
f.write(self.make_header(imports))
f.write(codegen_util.comment_line(
"ctypes struct, union, and function type declarations"))
for type_decl in self.types_dict.values():
f.write("\n" + type_decl.ctypes_decl)
f.write("\n" + codegen_util.comment_line("End of generated code"))
def write_wrappers(self, fname):
"""Write wrapper classes for ctypes structs."""
with open(fname, "w") as f:
imports = [
"import ctypes",
"# pylint: disable=undefined-variable",
"# pylint: disable=wildcard-import",
"from {} import util".format(_MODULE),
"from {}.mjbindings.types import *".format(_MODULE),
]
f.write(self.make_header(imports))
f.write(codegen_util.comment_line("Low-level wrapper classes"))
for type_decl in self.types_dict.values():
if isinstance(type_decl, c_declarations.Struct):
f.write("\n" + type_decl.wrapper_class)
f.write("\n" + codegen_util.comment_line("End of generated code"))
def write_funcs_and_globals(self, fname):
"""Write ctypes declarations for functions and global data."""
imports = [
"import collections",
"import ctypes",
"# pylint: disable=undefined-variable",
"# pylint: disable=wildcard-import",
"from {} import util".format(_MODULE),
"from {}.mjbindings.types import *".format(_MODULE),
"import numpy as np",
"# pylint: disable=line-too-long",
"# pylint: disable=invalid-name",
"# common_typos_disable",
]
with open(fname, "w") as f:
f.write(self.make_header(imports))
f.write("mjlib = util.get_mjlib()\n")
f.write("\n" + codegen_util.comment_line("ctypes function declarations"))
for function in self.funcs_dict.values():
f.write("\n" + function.ctypes_func_decl(cdll_name="mjlib"))
# Only require strings for UI purposes.
f.write("\n" + codegen_util.comment_line("String arrays") + "\n")
for string_arr in self.strings_dict.values():
f.write(string_arr.ctypes_var_decl(cdll_name="mjlib"))
f.write("\n" + codegen_util.comment_line("Callback function pointers"))
fields = ["'_{0}'".format(func_ptr.name)
for func_ptr in self.func_ptrs_dict.values()]
values = [func_ptr.ctypes_var_decl(cdll_name="mjlib")
for func_ptr in self.func_ptrs_dict.values()]
f.write(
textwrap.dedent("""
class _Callbacks:
__slots__ = [
{0}
]
def __init__(self):
{1}
""").format(",\n ".join(fields), "\n ".join(values)))
indent = codegen_util.Indenter()
with indent:
for func_ptr in self.func_ptrs_dict.values():
f.write(indent(func_ptr.getters_setters_with_custom_prefix("self._")))
f.write("\n\ncallbacks = _Callbacks() # pylint: disable=invalid-name")
f.write("\ndel _Callbacks\n")
f.write("\n" + codegen_util.comment_line("End of generated code"))
def write_index_dict(self, fname):
"""Write file containing array shape information for indexing."""
pp = pprint.PrettyPrinter()
output_string = pp.pformat(dict(self.index_dict))
indent = codegen_util.Indenter()
imports = [
"# pylint: disable=bad-continuation",
"# pylint: disable=line-too-long",
]
with open(fname, "w") as f:
f.write(self.make_header(imports))
f.write("array_sizes = (\n")
with indent:
f.write(output_string)
f.write("\n)")
f.write("\n" + codegen_util.comment_line("End of generated code"))
|
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
# D2h C2h C2v D2 Cs Ci C2 C1
# E E E E E E E E
# C2x C2x
# C2y C2y
# C2z C2 C2 C2z C2
# i i i
# sx sx
# sy sy
# sz sh sh
POINTGROUP = ('D2h', 'C2h', 'C2v', 'D2' , 'Cs' , 'Ci' , 'C2' , 'C1' ,)
OPERATOR_TABLE = {
'D2h': ('E', 'C2x', 'C2y', 'C2z', 'i', 'sx' , 'sy' , 'sz' ),
'C2h': ('E', 'C2z', 'i', 'sz' ),
'C2v': ('E', 'C2z', 'sx' , 'sy' , ),
'D2' : ('E', 'C2x', 'C2y', 'C2z', ),
'Cs' : ('E', 'sz' ),
'Ci' : ('E', 'i', ),
'C2' : ('E', 'C2z', ),
'C1' : ('E', ),
}
#
IRREP_ID_TABLE = { # bin for XOR
'D2h': {'Ag' : 0, # 000
'B1g': 1, # 001
'B2g': 2, # 010
'B3g': 3, # 011
'Au' : 4, # 100
'B1u': 5, # 101
'B2u': 6, # 110
'B3u': 7,}, # 111
'C2h': {'Ag': 0, # 00
'Bg': 1, # 01
'Au': 2, # 10
'Bu': 3,}, # 11
'C2v': {'A1': 0, # 00
'A2': 1, # 01
'B1': 2, # 10
'B2': 3,}, # 11
'D2' : {'A' : 0, # 00
'B1': 1, # 01
'B2': 2, # 10
'B3': 3,}, # 11
'Cs' : {'A\'': 0, # 0
'A\"': 1,}, # 1
'Ci' : {'Ag': 0, # 0
'Au': 1,}, # 1
'C2' : {'A': 0, # 0
'B': 1,}, # 1
'C1' : {'A': 0,}, # 0
}
IRREP_ID_MOLPRO = {'D2h': (1, # Ag
4, # B1g
6, # B2g
7, # B3g
8, # Au
5, # B1u
3, # B2u
2), # B3u
'C2v': (1, # A1
4, # A2
2, # B1
3), # B2
'C2h': (1, # Ag
4, # Bg
2, # Au
3), # Bu
'D2' : (1, # A
4, # B1
3, # B2
2), # B3
'Cs' : (1, # A'
2), # A"
'C2' : (1, # A
2), # B
'Ci' : (1, # Ag
2), # Au
'C1' : (1,)}
# E,C2x,C2y,C2z,i, sx,sy,sz
CHARACTER_TABLE = { # XOR
'D2h': (('Ag' , 1, 1, 1, 1, 1, 1, 1, 1), # 000
('B1g', 1,-1, -1, 1, 1,-1,-1, 1), # 001
('B2g', 1,-1, 1, -1, 1,-1, 1,-1), # 010
('B3g', 1, 1, -1, -1, 1, 1,-1,-1), # 011
('Au' , 1, 1, 1, 1, -1,-1,-1,-1), # 100
('B1u', 1,-1, -1, 1, -1, 1, 1,-1), # 101
('B2u', 1,-1, 1, -1, -1, 1,-1, 1), # 110
('B3u', 1, 1, -1, -1, -1,-1, 1, 1)), # 111
# E,C2,i, sh # XOR
'C2h': (('Ag', 1, 1, 1, 1), # 00
('Bg', 1,-1, 1,-1), # 01
('Au', 1, 1,-1,-1), # 10
('Bu', 1,-1,-1, 1)), # 11
# E,C2,sx,sy # XOR
'C2v': (('A1', 1, 1, 1, 1), # 00
('A2', 1, 1,-1,-1), # 01
('B1', 1,-1,-1, 1), # 10
('B2', 1,-1, 1,-1)), # 11
# E,C2x,C2y,C2z # XOR
'D2' : (('A' , 1, 1, 1, 1), # 00
('B1', 1,-1, -1, 1), # 01
('B2', 1,-1, 1, -1), # 10
('B3', 1, 1, -1, -1)), # 11
# E, sh # XOR
'Cs' : (('A\'',1, 1,), # 0
('A\"',1,-1,)), # 1
# E, i # XOR
'Ci' : (('Ag', 1, 1,), # 0
('Au', 1,-1,)), # 1
# E, C2 # XOR
'C2' : (('A', 1, 1,), # 0
('B', 1,-1,)), # 1
# E # XOR
'C1' : (('A', 1),), # 0
}
# D2h C2h C2v D2 Cs Ci C2 C1
SYMM_DESCENT_Z = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Ag', 'A2', 'B1', 'A\'', 'Ag', 'A', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Bg', 'B2', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\'', 'Au', 'A', 'A'),
('B1u', 'Au', 'A1', 'B1', 'A\'', 'Au', 'A', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\"', 'Au', 'B', 'A'),
('B3u', 'Bu', 'B1', 'B3', 'A\"', 'Au', 'B', 'A'),
)
SYMM_DESCENT_X = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Bg', 'B1', 'B2', 'A\"', 'Ag', 'B', 'A'),
('B3g', 'Ag', 'A2', 'B3', 'A\'', 'Ag', 'A', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Bu', 'B2', 'B2', 'A\'', 'Au', 'B', 'A'),
('B3u', 'Au', 'A1', 'B3', 'A\"', 'Au', 'A', 'A'),
)
SYMM_DESCENT_Y = (
('Ag' , 'Ag', 'A1', 'A' , 'A\'', 'Ag', 'A', 'A'),
('B1g', 'Bg', 'B2', 'B1', 'A\"', 'Ag', 'B', 'A'),
('B2g', 'Ag', 'A2', 'B2', 'A\'', 'Ag', 'A', 'A'),
('B3g', 'Bg', 'B1', 'B3', 'A\"', 'Ag', 'B', 'A'),
('Au' , 'Au', 'A2', 'A' , 'A\"', 'Au', 'A', 'A'),
('B1u', 'Bu', 'B1', 'B1', 'A\'', 'Au', 'B', 'A'),
('B2u', 'Au', 'A1', 'B2', 'A\"', 'Au', 'A', 'A'),
('B3u', 'Bu', 'B2', 'B3', 'A\'', 'Au', 'B', 'A'),
)
SPHERIC_GTO_PARITY_ODD = (
# s
((0, 0, 0),),
# px, py, pz
((1, 0, 0),(0, 1, 0),(0, 0, 1)),
# dxy, dyz, dz2, dxz, dx2y2
((1, 1, 0),(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# fyx2, fxyz, fyz2, fz3, fxz2, fzx2, fx3
((0, 1, 0),(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),
(0, 0, 1),(1, 0, 0),),
# g
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(0, 0, 0),
(1, 0, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# h
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),),
# i
((1, 1, 0),(0, 1, 1),(1, 1, 0),(0, 1, 1),(1, 1, 0),
(0, 1, 1),(0, 0, 0),(1, 0, 1),(0, 0, 0),(1, 0, 1),
(0, 0, 0),(1, 0, 1),(0, 0, 0),),
# j
((0, 1, 0),(1, 1, 1),(0, 1, 0),(1, 1, 1),(0, 1, 0),
(1, 1, 1),(0, 1, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),
(1, 0, 0),(0, 0, 1),(1, 0, 0),(0, 0, 1),(1, 0, 0))
)
SUBGROUP = {
'Dooh':('Coov', 'D2h', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'Coov':('C2v', 'C2', 'C1'),
'D2h': ('D2h', 'C2v', 'C2h', 'C2', 'Cs', 'Ci', 'C1'),
'C2v': ('C2v', 'C2' , 'Cs' , 'C1'),
'C2h': ('C2h', 'C2' , 'Cs' , 'C1'),
'D2' : ('D2' , 'C2' , 'Ci' , 'C1'),
'Cs' : ('Cs' , 'C1'),
'Ci' : ('Ci' , 'C1'),
'C2' : ('C2' , 'C1'),
'C1' : ('C1',),
}
D2H_OPS = {'E' : numpy.eye(3),
'C2z': numpy.diag((-1.,-1., 1.)),
'C2x': numpy.diag(( 1.,-1.,-1.)),
'C2y': numpy.diag((-1., 1.,-1.)),
'i' : numpy.diag((-1.,-1.,-1.)),
'sz' : numpy.diag(( 1., 1.,-1.)),
'sx' : numpy.diag((-1., 1., 1.)),
'sy' : numpy.diag(( 1.,-1., 1.)),}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with tf.variable_scope(scope, 'InceptionV2', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d, slim.separable_conv2d],
stride=1, padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = slim.separable_conv2d(
inputs, depth(64), [7, 7], depth_multiplier=depthwise_multiplier,
stride=2, weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, depth(64), [1, 1], scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(64), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(96), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(160), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(128), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(160), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(192), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = slim.conv2d(branch_0, depth(192), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3],
scope='Conv2d_0b_3x3')
branch_1 = slim.conv2d(branch_1, depth(256), [3, 3], stride=2,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(net, [3, 3], stride=2,
scope='MaxPool_1a_3x3')
net = tf.concat_v2([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, depth(320), [3, 3],
scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
net, depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0b_3x3')
branch_2 = slim.conv2d(branch_2, depth(224), [3, 3],
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(
branch_3, depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = tf.concat_v2([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v2_base(
inputs, scope=scope, min_depth=min_depth,
depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compatibility functionality for Windbg users.
"""
import codecs
import sys
import math
import gdb
import pwndbg.arch
import pwndbg.commands
import pwndbg.memory
import pwndbg.strings
import pwndbg.typeinfo
def get_type(size):
return {
1: pwndbg.typeinfo.uint8,
2: pwndbg.typeinfo.uint16,
4: pwndbg.typeinfo.uint32,
8: pwndbg.typeinfo.uint64,
}[size]
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def db(address, count=64):
"""
Starting at the specified address, dump N bytes
(default 64).
"""
return dX(1, (address), (count))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dw(address, count=32):
"""
Starting at the specified address, dump N words
(default 32).
"""
return dX(2, (address), (count))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dd(address, count=16):
"""
Starting at the specified address, dump N dwords
(default 16).
"""
return dX(4, (address), (count))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dq(address, count=8):
"""
Starting at the specified address, dump N qwords
(default 8).
"""
return dX(8, (address), (count))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dc(address, count=8):
return pwndbg.commands.hexdump.hexdump(address=address, count=count)
def dX(size, address, count, to_string=False):
"""
Traditionally, windbg will display 16 bytes of data per line.
"""
values = []
address = int(address) & pwndbg.arch.ptrmask
type = get_type(size)
for i in range(count):
try:
gval = pwndbg.memory.poi(type, address + i * size)
# print(str(gval))
values.append(int(gval))
except gdb.MemoryError:
break
n_rows = int(math.ceil(count * size / float(16)))
row_sz = int(16 / size)
rows = [values[i*row_sz:(i+1)*row_sz] for i in range(n_rows)]
lines = []
# sys.stdout.write(repr(rows) + '\n')
for i, row in enumerate(rows):
if not row:
continue
line = [enhex(pwndbg.arch.ptrsize, address + (i*16)),' ']
for value in row:
line.append(enhex(size, value))
lines.append(' '.join(line))
if not to_string:
print('\n'.join(lines))
return lines
def enhex(size, value):
value = value & pwndbg.arch.ptrmask
x = "%x" % abs(value)
x = x.rjust(size * 2, '0')
return x
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def eb(address, *data):
"""
Write hex bytes at the specified address.
"""
return eX(1, address, data)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def ew(address, *data):
"""
Write hex words at the specified address.
"""
return eX(2, address, data)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def ed(address, *data):
"""
Write hex dwords at the specified address.
"""
return eX(4, address, data)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def eq(address, *data):
"""
Write hex qwords at the specified address.
"""
return eX(8, address, data)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def ez(address, *data):
"""
Write a string at the specified address.
"""
return eX(1, address, data[0], hex=False)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def eza(address, *data):
"""
Write a string at the specified address.
"""
return ez(address, data)
def eX(size, address, data, hex=True):
"""
This relies on windbg's default hex encoding being enforced
"""
address = pwndbg.commands.fix(address)
for i,bytestr in enumerate(data):
if hex:
bytestr = bytestr.rjust(size*2, '0')
data = codecs.decode(bytestr, 'hex')
else:
data = bytestr
pwndbg.memory.write(address + (i * size), data)
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dds(*a):
"""
Dump pointers and symbols at the specified address.
"""
return pwndbg.commands.telescope.telescope(*a)
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dps(*a):
"""
Dump pointers and symbols at the specified address.
"""
return pwndbg.commands.telescope.telescope(*a)
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def dqs(*a):
"""
Dump pointers and symbols at the specified address.
"""
return pwndbg.commands.telescope.telescope(*a)
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def da(address, max=256):
"""
Dump a string at the specified address.
"""
print("%x" % address, repr(pwndbg.strings.get(address, max)))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def bl():
"""
List breakpoints
"""
gdb.execute('info breakpoints')
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def bd(which = '*'):
"""
Disable the breapoint with the specified index.
"""
if which == '*':
gdb.execute('disable breakpoints')
else:
gdb.execute('disable breakpoints %s' % which)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def be(which = '*'):
"""
Enable the breapoint with the specified index.
"""
if which == '*':
gdb.execute('enable breakpoints')
else:
gdb.execute('enable breakpoints %s' % which)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def bc(which = '*'):
"""
Clear the breapoint with the specified index.
"""
if which == '*':
gdb.execute('delete breakpoints')
else:
gdb.execute('delete breakpoints %s' % which)
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def bp(where):
"""
Set a breakpoint
"""
gdb.execute('break *%#x' % int(where))
@pwndbg.commands.ParsedCommand
@pwndbg.commands.OnlyWhenRunning
def u(where=None, n=5):
"""
Starting at the specified address, disassemble
N instructions (default 5).
"""
if where is None:
where = pwndbg.regs.pc
cmd = 'x/%ii %#x' % (int(n), int(where))
gdb.execute(cmd)
@pwndbg.commands.Command
@pwndbg.commands.OnlyWhenRunning
def k():
gdb.execute('bt')
|
|
from common_fixtures import * # NOQA
from cattle import ApiError
def _create_stack(client):
env = client.create_stack(name=random_str())
env = client.wait_success(env)
assert env.state == "active"
return env
def test_create_duplicated_services(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
service_name = random_str()
service1 = client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config)
client.wait_success(service1)
with pytest.raises(ApiError) as e:
client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
with pytest.raises(ApiError) as e:
client.create_service(name=service_name.upper(),
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
with pytest.raises(ApiError) as e:
client.create_externalService(name=service_name,
stackId=env.id,
launchConfig=launch_config,
externalIpAddresses=["72.22.16.5"])
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
with pytest.raises(ApiError) as e:
client.create_dnsService(name=service_name,
stackId=env.id)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
# try to update the service with duplicated service name
service = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
service = client.wait_success(service)
with pytest.raises(ApiError) as e:
client.update(service, name=service_name)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
# remove the service and try to re-use its name
client.wait_success(service1.remove())
client.create_service(name=service_name,
stackId=env.id,
launchConfig=launch_config)
def test_external_service_w_hostname(client, context):
env = _create_stack(client)
# try to create external service with both hostname externalips
with pytest.raises(ApiError) as e:
ips = ["72.22.16.5", '192.168.0.10']
client.create_externalService(name=random_str(),
stackId=env.id,
hostname="a.com",
externalIpAddresses=ips)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
def test_circular_refs(client, context):
env = _create_stack(client)
# test direct circular ref
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary']}
secondary_lc = {"imageUuid": image_uuid, "name": "secondary",
"dataVolumesFromLaunchConfigs": ['primary']}
with pytest.raises(ApiError) as e:
client.create_service(name="primary",
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidReference'
# test indirect circular ref
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid,
"dataVolumesFromLaunchConfigs": ['secondary1']}
s_lc1 = {"imageUuid": image_uuid, "name": "secondary1",
"dataVolumesFromLaunchConfigs": ['secondary2']}
s_lc2 = {"imageUuid": image_uuid, "name": "secondary2",
"dataVolumesFromLaunchConfigs": ['primary']}
with pytest.raises(ApiError) as e:
client.create_service(name="primary",
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[s_lc1, s_lc2])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidReference'
def test_no_circular_ref(client, context):
env = _create_stack(client)
# test that there is no circular reference when secondary has both
# net and volumes from and primary is using volumes_from
image_uuid = context.image_uuid
launch_config = {'imageUuid': image_uuid,
'dataVolumesFromLaunchConfigs': ['secondary1']}
secondary1_lc = {'imageUuid': image_uuid, 'name': 'secondary1'}
secondary2_lc = {'imageUuid': image_uuid, 'name': 'secondary2',
'dataVolumesFromLaunchConfigs': ['primary'],
'networkMode': 'container',
'networkLaunchConfig': 'primary'}
svc = client.create_service(name="primary",
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary1_lc,
secondary2_lc])
svc = client.wait_success(svc)
assert svc.state == 'inactive'
def test_vip_requested_ip(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
# vip out of the range - still accepted
vip = "169.255.65.30"
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
vip=vip)
assert svc.vip == vip
def test_add_svc_to_removed_stack(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
env.remove()
with pytest.raises(ApiError) as e:
client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidState'
assert e.value.error.fieldName == 'stackId'
with pytest.raises(ApiError) as e:
client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidState'
assert e.value.error.fieldName == 'stackId'
def test_validate_launch_config_name(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc_name = random_str()
service = client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
client.wait_success(service)
launch_config = {"imageUuid": image_uuid}
secondary_lc = {"imageUuid": image_uuid,
"name": svc_name}
with pytest.raises(ApiError) as e:
client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
def test_validate_service_token(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc_name = random_str()
service = client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
client.wait_success(service)
service = super_client.reload(service)
assert service.state == "inactive"
assert service.data.fields.token is not None
token = service.data.fields.token
svc_name = random_str()
client.update(service, name=svc_name)
client.wait_success(service)
service = super_client.reload(service)
assert service.name == svc_name
assert service.data.fields.token is not None
assert service.data.fields.token == token
def test_ip_retain(client, context, super_client):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=1,
retainIp=True)
svc = client.wait_success(svc)
assert svc.state == "inactive"
# validate that startFirst can't be used on a service with retainIp = true
strategy = {"launchConfig": launch_config,
"intervalMillis": 100,
"startFirst": True}
with pytest.raises(ApiError) as e:
svc.upgrade_action(inServiceStrategy=strategy)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
def test_null_scale(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=None)
svc = client.wait_success(svc)
assert svc.state == "inactive"
assert svc.scale is not None
def test_validate_svc_name(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
# svc_name starting with hyphen
svc_name = "-" + random_str()
with pytest.raises(ApiError) as e:
client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidCharacters'
# svc_name ending in hyphen
svc_name = random_str() + "-"
with pytest.raises(ApiError) as e:
client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidCharacters'
# svc_name with --
svc_name = random_str() + "--end"
with pytest.raises(ApiError) as e:
client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidCharacters'
# svc_name with more than 63 chars
svc_name = random_str() + "myLinkTOOLONGtoolongtoolongtoolongmy" \
"LinkTOOLONGtoolongtoolongtoolong"
with pytest.raises(ApiError) as e:
client.create_service(name=svc_name,
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'MaxLengthExceeded'
# svc_name with single char
svc = client.create_service(name='a',
stackId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
assert svc.state == "inactive"
def test_setlinks_on_removed(client, context):
env = _create_stack(client)
image_uuid = context.image_uuid
launch_config = {"imageUuid": image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
svc = client.wait_success(svc)
target = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
target = client.wait_success(target)
client.wait_success(target.remove())
link = {"serviceId": target.id, "name": "link1"}
with pytest.raises(ApiError) as e:
svc. \
setservicelinks(serviceLinks=[link])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidReference'
def test_invalid_ports(client, context):
env = _create_stack(client)
launch_config = {"imageUuid": context.image_uuid,
"ports": [":45677"]}
with pytest.raises(ApiError) as e:
client. \
create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'PortWrongFormat'
def test_region_links(client, context):
env = _create_stack(client)
launch_config = {"imageUuid": context.image_uuid}
svc = client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config)
link = {"service": 'foo', "name": ''}
with pytest.raises(ApiError) as e:
svc. \
setservicelinks(serviceLinks=[link])
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == "name"
|
|
"""Tests for tools for solving inequalities and systems of inequalities. """
from sympy import (And, Eq, FiniteSet, Ge, Gt, Interval, Le, Lt, Ne, oo,
Or, S, sin, sqrt, Symbol, Union, Integral, Sum,
Function, Poly, PurePoly, pi, root, log, exp)
from sympy.solvers.inequalities import (reduce_inequalities,
solve_poly_inequality as psolve,
reduce_rational_inequalities,
solve_univariate_inequality as isolve,
reduce_abs_inequality)
from sympy.polys.rootoftools import rootof
from sympy.solvers.solvers import solve
from sympy.abc import x, y
from sympy.utilities.pytest import raises, slow
inf = oo.evalf()
def test_solve_poly_inequality():
assert psolve(Poly(0, x), '==') == [S.Reals]
assert psolve(Poly(1, x), '==') == [S.EmptySet]
assert psolve(PurePoly(x + 1, x), ">") == [Interval(-1, oo, True, False)]
def test_reduce_poly_inequalities_real_interval():
assert reduce_rational_inequalities(
[[Eq(x**2, 0)]], x, relational=False) == FiniteSet(0)
assert reduce_rational_inequalities(
[[Le(x**2, 0)]], x, relational=False) == FiniteSet(0)
assert reduce_rational_inequalities(
[[Lt(x**2, 0)]], x, relational=False) == S.EmptySet
assert reduce_rational_inequalities(
[[Ge(x**2, 0)]], x, relational=False) == \
S.Reals if x.is_real else Interval(-oo, oo)
assert reduce_rational_inequalities(
[[Gt(x**2, 0)]], x, relational=False) == \
FiniteSet(0).complement(S.Reals)
assert reduce_rational_inequalities(
[[Ne(x**2, 0)]], x, relational=False) == \
FiniteSet(0).complement(S.Reals)
assert reduce_rational_inequalities(
[[Eq(x**2, 1)]], x, relational=False) == FiniteSet(-1, 1)
assert reduce_rational_inequalities(
[[Le(x**2, 1)]], x, relational=False) == Interval(-1, 1)
assert reduce_rational_inequalities(
[[Lt(x**2, 1)]], x, relational=False) == Interval(-1, 1, True, True)
assert reduce_rational_inequalities(
[[Ge(x**2, 1)]], x, relational=False) == \
Union(Interval(-oo, -1), Interval(1, oo))
assert reduce_rational_inequalities(
[[Gt(x**2, 1)]], x, relational=False) == \
Interval(-1, 1).complement(S.Reals)
assert reduce_rational_inequalities(
[[Ne(x**2, 1)]], x, relational=False) == \
FiniteSet(-1, 1).complement(S.Reals)
assert reduce_rational_inequalities([[Eq(
x**2, 1.0)]], x, relational=False) == FiniteSet(-1.0, 1.0).evalf()
assert reduce_rational_inequalities(
[[Le(x**2, 1.0)]], x, relational=False) == Interval(-1.0, 1.0)
assert reduce_rational_inequalities([[Lt(
x**2, 1.0)]], x, relational=False) == Interval(-1.0, 1.0, True, True)
assert reduce_rational_inequalities(
[[Ge(x**2, 1.0)]], x, relational=False) == \
Union(Interval(-inf, -1.0), Interval(1.0, inf))
assert reduce_rational_inequalities(
[[Gt(x**2, 1.0)]], x, relational=False) == \
Union(Interval(-inf, -1.0, right_open=True),
Interval(1.0, inf, left_open=True))
assert reduce_rational_inequalities([[Ne(
x**2, 1.0)]], x, relational=False) == \
FiniteSet(-1.0, 1.0).complement(S.Reals)
s = sqrt(2)
assert reduce_rational_inequalities([[Lt(
x**2 - 1, 0), Gt(x**2 - 1, 0)]], x, relational=False) == S.EmptySet
assert reduce_rational_inequalities([[Le(x**2 - 1, 0), Ge(
x**2 - 1, 0)]], x, relational=False) == FiniteSet(-1, 1)
assert reduce_rational_inequalities(
[[Le(x**2 - 2, 0), Ge(x**2 - 1, 0)]], x, relational=False
) == Union(Interval(-s, -1, False, False), Interval(1, s, False, False))
assert reduce_rational_inequalities(
[[Le(x**2 - 2, 0), Gt(x**2 - 1, 0)]], x, relational=False
) == Union(Interval(-s, -1, False, True), Interval(1, s, True, False))
assert reduce_rational_inequalities(
[[Lt(x**2 - 2, 0), Ge(x**2 - 1, 0)]], x, relational=False
) == Union(Interval(-s, -1, True, False), Interval(1, s, False, True))
assert reduce_rational_inequalities(
[[Lt(x**2 - 2, 0), Gt(x**2 - 1, 0)]], x, relational=False
) == Union(Interval(-s, -1, True, True), Interval(1, s, True, True))
assert reduce_rational_inequalities(
[[Lt(x**2 - 2, 0), Ne(x**2 - 1, 0)]], x, relational=False
) == Union(Interval(-s, -1, True, True), Interval(-1, 1, True, True),
Interval(1, s, True, True))
def test_reduce_poly_inequalities_complex_relational():
assert reduce_rational_inequalities(
[[Eq(x**2, 0)]], x, relational=True) == Eq(x, 0)
assert reduce_rational_inequalities(
[[Le(x**2, 0)]], x, relational=True) == Eq(x, 0)
assert reduce_rational_inequalities(
[[Lt(x**2, 0)]], x, relational=True) == False
assert reduce_rational_inequalities(
[[Ge(x**2, 0)]], x, relational=True) == And(Lt(-oo, x), Lt(x, oo))
assert reduce_rational_inequalities(
[[Gt(x**2, 0)]], x, relational=True) == \
And(Or(And(Lt(-oo, x), Lt(x, 0)), And(Lt(0, x), Lt(x, oo))))
assert reduce_rational_inequalities(
[[Ne(x**2, 0)]], x, relational=True) == \
And(Or(And(Lt(-oo, x), Lt(x, 0)), And(Lt(0, x), Lt(x, oo))))
for one in (S(1), S(1.0)):
inf = one*oo
assert reduce_rational_inequalities(
[[Eq(x**2, one)]], x, relational=True) == \
Or(Eq(x, -one), Eq(x, one))
assert reduce_rational_inequalities(
[[Le(x**2, one)]], x, relational=True) == \
And(And(Le(-one, x), Le(x, one)))
assert reduce_rational_inequalities(
[[Lt(x**2, one)]], x, relational=True) == \
And(And(Lt(-one, x), Lt(x, one)))
assert reduce_rational_inequalities(
[[Ge(x**2, one)]], x, relational=True) == \
And(Or(And(Le(one, x), Lt(x, inf)), And(Le(x, -one), Lt(-inf, x))))
assert reduce_rational_inequalities(
[[Gt(x**2, one)]], x, relational=True) == \
And(Or(And(Lt(-inf, x), Lt(x, -one)), And(Lt(one, x), Lt(x, inf))))
assert reduce_rational_inequalities(
[[Ne(x**2, one)]], x, relational=True) == \
Or(And(Lt(-inf, x), Lt(x, -one)),
And(Lt(-one, x), Lt(x, one)),
And(Lt(one, x), Lt(x, inf)))
def test_reduce_rational_inequalities_real_relational():
assert reduce_rational_inequalities([], x) == False
assert reduce_rational_inequalities(
[[(x**2 + 3*x + 2)/(x**2 - 16) >= 0]], x, relational=False) == \
Union(Interval.open(-oo, -4), Interval(-2, -1), Interval.open(4, oo))
assert reduce_rational_inequalities(
[[((-2*x - 10)*(3 - x))/((x**2 + 5)*(x - 2)**2) < 0]], x,
relational=False) == \
Union(Interval.open(-5, 2), Interval.open(2, 3))
assert reduce_rational_inequalities([[(x + 1)/(x - 5) <= 0]], x,
relational=False) == \
Interval.Ropen(-1, 5)
assert reduce_rational_inequalities([[(x**2 + 4*x + 3)/(x - 1) > 0]], x,
relational=False) == \
Union(Interval.open(-3, -1), Interval.open(1, oo))
assert reduce_rational_inequalities([[(x**2 - 16)/(x - 1)**2 < 0]], x,
relational=False) == \
Union(Interval.open(-4, 1), Interval.open(1, 4))
assert reduce_rational_inequalities([[(3*x + 1)/(x + 4) >= 1]], x,
relational=False) == \
Union(Interval.open(-oo, -4), Interval.Ropen(S(3)/2, oo))
assert reduce_rational_inequalities([[(x - 8)/x <= 3 - x]], x,
relational=False) == \
Union(Interval.Lopen(-oo, -2), Interval.Lopen(0, 4))
# issue sympy/sympy#10237
assert reduce_rational_inequalities(
[[x < oo, x >= 0, -oo < x]], x, relational=False) == Interval(0, oo)
def test_reduce_abs_inequalities():
e = abs(x - 5) < 3
ans = And(Lt(2, x), Lt(x, 8))
assert reduce_inequalities(e) == ans
assert reduce_inequalities(e, x) == ans
assert reduce_inequalities(abs(x - 5)) == Eq(x, 5)
assert reduce_inequalities(
abs(2*x + 3) >= 8) == Or(And(Le(S(5)/2, x), Lt(x, oo)),
And(Le(x, -S(11)/2), Lt(-oo, x)))
assert reduce_inequalities(abs(x - 4) + abs(
3*x - 5) < 7) == And(Lt(S(1)/2, x), Lt(x, 4))
assert reduce_inequalities(abs(x - 4) + abs(3*abs(x) - 5) < 7) == \
Or(And(S(-2) < x, x < -1), And(S(1)/2 < x, x < 4))
nr = Symbol('nr', real=False)
raises(TypeError, lambda: reduce_inequalities(abs(nr - 5) < 3))
assert reduce_inequalities(x < 3, symbols=[x, nr]) == And(-oo < x, x < 3)
def test_reduce_inequalities_general():
assert reduce_inequalities(Ge(sqrt(2)*x, 1)) == And(sqrt(2)/2 <= x, x < oo)
assert reduce_inequalities(PurePoly(x + 1, x) > 0) == And(S(-1) < x, x < oo)
def test_reduce_inequalities_boolean():
assert reduce_inequalities(
[Eq(x**2, 0), True]) == Eq(x, 0)
assert reduce_inequalities([Eq(x**2, 0), False]) == False
assert reduce_inequalities(x**2 >= 0) is S.true # issue 10196
def test_reduce_inequalities_multivariate():
assert reduce_inequalities([Ge(x**2, 1), Ge(y**2, 1)]) == And(
Or(And(Le(1, x), Lt(x, oo)), And(Le(x, -1), Lt(-oo, x))),
Or(And(Le(1, y), Lt(y, oo)), And(Le(y, -1), Lt(-oo, y))))
def test_reduce_inequalities_errors():
raises(NotImplementedError, lambda: reduce_inequalities(Ge(sin(x) + x, 1)))
raises(NotImplementedError, lambda: reduce_inequalities(Ge(x**2*y + y, 1)))
def test_hacky_inequalities():
assert reduce_inequalities(x + y < 1, symbols=[x]) == (x < 1 - y)
assert reduce_inequalities(x + y >= 1, symbols=[x]) == (x >= 1 - y)
assert reduce_inequalities(Eq(0, x - y), symbols=[x]) == Eq(x, y)
assert reduce_inequalities(Ne(0, x - y), symbols=[x]) == Ne(x, y)
def test_issue_6343():
eq = -3*x**2/2 - 45*x/4 + S(33)/2 > 0
assert reduce_inequalities(eq) == \
And(x < -S(15)/4 + sqrt(401)/4, -sqrt(401)/4 - S(15)/4 < x)
def test_issue_8235():
assert reduce_inequalities(x**2 - 1 < 0) == \
And(S(-1) < x, x < S(1))
assert reduce_inequalities(x**2 - 1 <= 0) == \
And(S(-1) <= x, x <= 1)
assert reduce_inequalities(x**2 - 1 > 0) == \
Or(And(-oo < x, x < -1), And(x < oo, S(1) < x))
assert reduce_inequalities(x**2 - 1 >= 0) == \
Or(And(-oo < x, x <= S(-1)), And(S(1) <= x, x < oo))
eq = x**8 + x - 9 # we want CRootOf solns here
sol = solve(eq >= 0)
tru = Or(And(rootof(eq, 1) <= x, x < oo), And(-oo < x, x <= rootof(eq, 0)))
assert sol == tru
# recast vanilla as real
assert solve(sqrt((-x + 1)**2) < 1) == And(S(0) < x, x < 2)
def test_issue_5526():
assert reduce_inequalities(S(0) <=
x + Integral(y**2, (y, 1, 3)) - 1, [x]) == \
(x >= -Integral(y**2, (y, 1, 3)) + 1)
f = Function('f')
e = Sum(f(x), (x, 1, 3))
assert reduce_inequalities(S(0) <= x + e + y**2, [x]) == \
(x >= -y**2 - Sum(f(x), (x, 1, 3)))
def test_solve_univariate_inequality():
assert isolve(x**2 >= 4, x, relational=False) == Union(Interval(-oo, -2),
Interval(2, oo))
assert isolve(x**2 >= 4, x) == Or(And(Le(2, x), Lt(x, oo)), And(Le(x, -2),
Lt(-oo, x)))
assert isolve((x - 1)*(x - 2)*(x - 3) >= 0, x, relational=False) == \
Union(Interval(1, 2), Interval(3, oo))
assert isolve((x - 1)*(x - 2)*(x - 3) >= 0, x) == \
Or(And(Le(1, x), Le(x, 2)), And(Le(3, x), Lt(x, oo)))
# issue 2785:
assert isolve(x**3 - 2*x - 1 > 0, x, relational=False) == \
Union(Interval(-1, -sqrt(5)/2 + S(1)/2, True, True),
Interval(S(1)/2 + sqrt(5)/2, oo, True, True))
# issue 2794:
assert isolve(x**3 - x**2 + x - 1 > 0, x, relational=False) == \
Interval(1, oo, True)
# XXX should be limited in domain, e.g. between 0 and 2*pi
assert isolve(sin(x) < S.Half, x) == \
Or(And(-oo < x, x < pi/6), And(5*pi/6 < x, x < oo))
assert isolve(sin(x) > S.Half, x) == And(pi/6 < x, x < 5*pi/6)
# numerical testing in valid() is needed
assert isolve(x**7 - x - 2 > 0, x) == \
And(rootof(x**7 - x - 2, 0) < x, x < oo)
# handle numerator and denominator; although these would be handled as
# rational inequalities, these test confirm that the right thing is done
# when the domain is EX (e.g. when 2 is replaced with sqrt(2))
assert isolve(1/(x - 2) > 0, x) == And(S(2) < x, x < oo)
den = ((x - 1)*(x - 2)).expand()
assert isolve((x - 1)/den <= 0, x) == \
Or(And(-oo < x, x < 1), And(S(1) < x, x < 2))
def test_issue_9954():
assert isolve(x**2 >= 0, x, relational=False) == S.Reals
assert isolve(x**2 >= 0, x, relational=True) == S.Reals.as_relational(x)
assert isolve(x**2 < 0, x, relational=False) == S.EmptySet
assert isolve(x**2 < 0, x, relational=True) == S.EmptySet.as_relational(x)
def test_slow_general_univariate():
r = rootof(x**5 - x**2 + 1, 0)
assert solve(sqrt(x) + 1/root(x, 3) > 1) == \
Or(And(S(0) < x, x < r**6), And(r**6 < x, x < oo))
def test_issue_8545():
eq = 1 - x - abs(1 - x)
ans = And(Lt(1, x), Lt(x, oo))
assert reduce_abs_inequality(eq, '<', x) == ans
eq = 1 - x - sqrt((1 - x)**2)
assert reduce_inequalities(eq < 0) == ans
def test_issue_8974():
assert isolve(-oo < x, x) == And(-oo < x, x < oo)
assert isolve(oo > x, x) == And(-oo < x, x < oo)
def test_issue_10047():
assert solve(sin(x) < 2) == And(-oo < x, x < oo)
def test_issue_10268():
assert solve(log(x) < 1000) == And(-oo < x, x < exp(1000))
|
|
from astroid.__pkginfo__ import author
import cgi
from collections import Counter
import logging
from math import log, exp
import math
from numpy import clip, mean
import os
import pyfscache
import re
from scipy import stats
import sqlite3
from sqlitedict import SqliteDict
import string
import tagme
import time
import expertfinding
__all__ = []
DEFAULT_MIN_SCORE = 0.20
def legit_document(doc_body):
return doc_body is not None and len(doc_body) > 10
def entities(text):
return tagme.annotate(text).annotations if text else []
def set_cache(cache_dir):
cache = pyfscache.FSCache(cache_dir)
expertfinding.entities = cache(expertfinding.entities)
def _annotated_text_generator(text, annotations):
prev = 0
for a in sorted(annotations, key=lambda a: a.begin):
yield cgi.escape(text[prev:a.begin])
yield u"<span class='annotation' entity='{}' score='{}'>{}</span>".format(cgi.escape(a.entity_title or ""), a.score, cgi.escape(text[a.begin: a.end]))
prev = a.end
yield text[prev:]
def annotated_text(text, annotations):
return "".join(_annotated_text_generator(text, annotations))
def join_entities_sql(entities):
return u", ".join(u"'{}'".format(t.replace("'", "''")) for t in entities)
def weighted_geom_mean(vals_weights):
return exp(sum(w * log(v) for v, w in vals_weights) / sum(w for _, w in vals_weights))
def _str_titles(t1, t2):
return unicode(sorted([t1, t2])).encode("utf-8")
class ExpertFindingBuilder(object):
def __init__(self, ef):
self.ef = ef
self.ef.db.execute('''CREATE TABLE IF NOT EXISTS authors
(author_id PRIMARY KEY, name, institution)
''')
self.ef.db.execute('''CREATE TABLE IF NOT EXISTS entity_occurrences
(entity, author_id, document_id, year, rho,
FOREIGN KEY(author_id) REFERENCES authors(author_id))''')
self.ef.db.execute('''CREATE TABLE IF NOT EXISTS documents
(author_id, document_id, year, body,
FOREIGN KEY(author_id) REFERENCES authors(author_id))''')
self.ef.db.execute('''CREATE TABLE IF NOT EXISTS institutions
(institution PRIMARY KEY, document_count)''')
self.ef.db.execute('''CREATE TABLE IF NOT EXISTS entities
(entity, institution, frequency, PRIMARY KEY (entity, institution))''')
self.ef.db.execute('''CREATE INDEX IF NOT EXISTS entities_author_id_index ON entity_occurrences (author_id)''')
self.ef.db.execute('''CREATE INDEX IF NOT EXISTS entities_entity_index ON entities (entity)''')
self.ef.db.execute('''CREATE INDEX IF NOT EXISTS entity_occurrences_entity_index ON entity_occurrences (entity)''')
def add_documents(self, input_f, papers_generator, min_year=None, max_year=None):
papers = list(papers_generator)
logging.info("%s: Number of papers (total): %d" % (os.path.basename(input_f), len(papers)))
papers = [p for p in papers if
(min_year is None or p.year >= min_year) and (max_year is None or p.year <= max_year)]
logging.info("%s: Number of papers (filtered) %d" % (os.path.basename(input_f), len(papers)))
if papers:
logging.info("%s: Number of papers (filtered) with abstract: %d" % (os.path.basename(input_f), sum(1 for p in papers if legit_document(p.abstract))))
logging.info("%s: Number of papers (filtered) with DOI but no abstract %d" % (os.path.basename(input_f), sum(1 for p in papers if not legit_document(p.abstract) and p.doi)))
document_id = self._next_paper_id()
for p in papers:
self._add_author(p.author_id, p.name, p.institution)
if (legit_document(p.abstract)):
ent = entities(p.abstract)
self._add_entities(p.author_id, document_id, p.year, p.institution, ent)
self._add_document_body(p.author_id, document_id, p.year, p.abstract, ent)
document_id += 1
self.ef.db_connection.commit()
def entities(self, author_id):
return self.ef.db.execute('''SELECT year, entity, rho FROM entity_occurrences WHERE author_id=?''', (author_id,)).fetchall()
def _add_entities(self, author_id, document_id, year, institution, annotations):
self.ef.db.executemany('INSERT INTO entity_occurrences VALUES (?,?,?,?,?)', ((a.entity_title, author_id, document_id, year, a.score) for a in annotations))
unique_entities = set(a.entity_title for a in annotations)
self.ef.db.executemany('''INSERT OR IGNORE INTO entities VALUES (?,?,0)''', ((e, institution) for e in unique_entities))
self.ef.db.executemany('''UPDATE entities
SET frequency = frequency + 1
WHERE entity=? AND institution=?''', ((e, institution) for e in unique_entities))
self.ef.db.execute('''INSERT OR IGNORE INTO institutions VALUES (?,0)''', (institution,))
self.ef.db.execute('''UPDATE institutions
SET document_count = document_count + 1
WHERE institution=?''', (institution,))
def _add_document_body(self, author_id, document_id, year, body, annotations):
annotated_t = annotated_text(body, annotations)
self.ef.db.execute('INSERT INTO documents VALUES (?,?,?,?)', (author_id, document_id, year, annotated_t))
def _next_paper_id(self):
return self.ef.db.execute('SELECT IFNULL(MAX(document_id), -1) FROM entity_occurrences').fetchall()[0][0] + 1
def _add_author(self, author_id, name, institution):
self.ef.db.execute('INSERT OR IGNORE INTO authors VALUES (?,?,?)', (author_id, name, institution))
class ExpertFinding(object):
def __init__(self, storage_db, erase=False, relatedness_dict_file=None):
if erase and os.path.isfile(storage_db):
os.remove(storage_db)
self.db_connection = sqlite3.connect(storage_db)
self.db = self.db_connection.cursor()
self.rel_dict = SqliteDict(relatedness_dict_file) if relatedness_dict_file else dict()
def builder(self):
return ExpertFindingBuilder(self)
def author_entity_frequency(self, author_id):
"""
Returns how many authors's papers have cited the entities cited by a specific author.
"""
return self.db.execute(u'''
SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho
FROM entity_occurrences
WHERE author_id == ? AND rho > ?
GROUP BY entity
''', (author_id, DEFAULT_MIN_SCORE)).fetchall()
def author_entity_frequency_and_popularity(self, author_id):
"""
Returns how many authors's papers have cited the entities cited by a specific author.
"""
return self.db.execute(u'''
SELECT e.entity, author_freq, SUM(e.frequency) AS entity_popularity, years, max_rho
FROM entities AS e,
(
SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho
FROM entity_occurrences
WHERE author_id == ? AND rho > ?
GROUP BY entity
) as d_e
WHERE d_e.entity == e.entity GROUP BY e.entity
''', (author_id, DEFAULT_MIN_SCORE)).fetchall()
def entity_popularity(self, entities):
"""
"""
return self.db.execute(u'''
SELECT entity, SUM(frequency) AS entity_popularity
FROM entities
WHERE entity IN ({})
GROUP BY entity
'''.format(join_entities_sql(entities))).fetchall()
def get_authors_count(self, institution):
"""
Returns how many authors are part of an institution.
"""
return self.db.execute(u'''SELECT COUNT(*) FROM authors WHERE institution==?''', (institution,)).fetchall()[0][0]
def total_papers(self):
return self.db.execute(u'''SELECT COUNT(*) FROM documents''').fetchall()[0][0]
def ef_iaf_author(self, author_id):
"""
Given an author, retrieve the entities cited by him, their EF and IAF.
"""
total_papers = self.total_papers()
author_entity_frequency = self.author_entity_frequency_and_popularity(author_id)
author_papers = self.author_papers_count(author_id)
return sorted(((
entity,
entity_author_freq / float(author_papers),
log(total_papers/float(entity_popularity)),
entity_author_freq / float(author_papers) * log(total_papers/float(entity_popularity)),
max_rho,
[int(y) for y in years.split(",")],
) for entity, entity_author_freq, entity_popularity, years, max_rho in author_entity_frequency), key=lambda t: t[3], reverse=True)
def ef_iaf_entities(self, entities):
total_papers = self.total_papers()
query_entity_popularity = dict(self.entity_popularity(entities))
return dict((e, 1.0/len(entities) * log(total_papers/float(query_entity_popularity[e]))) for e in query_entity_popularity.keys())
def author_papers_count(self, author_id):
return self.db.execute(u'''SELECT COUNT(DISTINCT(document_id)) FROM entity_occurrences WHERE author_id=?''', (author_id,)).fetchall()[0][0]
def institution_papers_count(self, institution):
return self.db.execute(u'''
SELECT document_count
FROM "institutions"
WHERE institution=?''', (institution,)).fetchall()[0][0]
def author_id(self, author_name):
return [r[0] for r in self.db.execute(u'''SELECT author_id FROM authors WHERE name=?''', (author_name,)).fetchall()]
def document(self, doc_id):
return self.db.execute(u'''SELECT author_id, document_id, year, body FROM documents WHERE document_id=?''', (doc_id,)).fetchone()
def documents(self, author_id, entities):
return self.db.execute(u'''
SELECT document_id, year, entity, COUNT(*)
FROM entity_occurrences
WHERE author_id=? AND entity IN ({})
GROUP BY document_id, entity'''.format(join_entities_sql(entities)), (author_id,)).fetchall()
def institution(self, author_id):
return self.db.execute(u'''SELECT institution FROM authors WHERE author_id=?''', (author_id,)).fetchall()[0][0]
def name(self, author_id):
return self.db.execute(u'''SELECT name FROM authors WHERE author_id=?''', (author_id,)).fetchall()[0][0]
def grouped_entities(self, author_id, year=None, min_freq=None):
contraints = []
if year is not None:
contraints.append('year=%d' % year)
if min_freq is not None:
contraints.append('COUNT(*)>=%d' % min_freq)
having = "HAVING {}".format(" AND ".join(contraints)) if contraints else ""
return self.db.execute(u'''SELECT entity, year, AVG(rho), MIN(rho), MAX(rho), GROUP_CONCAT(rho), COUNT(*)
FROM entity_occurrences
WHERE author_id=?
GROUP BY entity, year
{}
ORDER BY year, COUNT(*) DESC'''.format(having), author_id).fetchall()
def papers_count(self):
return self.db.execute(u'''
SELECT author_id, COUNT(DISTINCT(document_id))
FROM "entity_occurrences"
GROUP BY author_id''').fetchall()
def print_documents_quantiles(self):
papers_count = zip(*self.papers_count())[1]
print "number of documents: {}".format(sum(papers_count))
print "number of authors: {}".format(len(papers_count))
quantiles = stats.mstats.mquantiles(papers_count, prob=[n / 10.0 for n in range(10)])
print "quantiles:", quantiles
for i in range(len(quantiles)):
begin = int(quantiles[i])
end = int(quantiles[i + 1]) - 1 if i < len(quantiles) - 1 else max(papers_count)
print "{} authors have {}-{} documents with abstract".format(sum(1 for c in papers_count if begin <= c <= end), begin, end)
def citing_authors(self, entities):
"""
Returns the list of authors citing any of the entities passed by arguments.
"""
result = self.db.execute(u'''SELECT DISTINCT(author_id)
FROM "entity_occurrences"
WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()
return [t[0] for t in result]
def authors_completion(self, terms):
"""
Returns author names autocompletion for terms.
"""
return self.db.execute(u'''SELECT * FROM "authors" WHERE name LIKE ? LIMIT 50''', (u"%{}%".format(terms),)).fetchall()
def _prefetch_relatedness(self, entity_group_1, entity_group_2):
pairs = ((e1, e2) for e1 in entity_group_1 for e2 in entity_group_2)
pairs_to_retrieve = [p for p in pairs if _str_titles(*p) not in self.rel_dict]
if pairs_to_retrieve:
for titles, rel in tagme.relatedness_title(pairs_to_retrieve):
self.rel_dict[_str_titles(*titles)] = rel
for p in pairs:
assert _str_titles(*p) in self.rel_dict
self.rel_dict.commit()
def cossim_efiaf_score(self, query_entities, author_id):
author_entity_to_efiaf = dict((e[0], e[3]) for e in self.ef_iaf_author(author_id))
query_entity_to_efiaf = self.ef_iaf_entities(query_entities)
return sum(author_entity_to_efiaf[e] * query_entity_to_efiaf[e] for e in set(author_entity_to_efiaf.keys()) & set(query_entity_to_efiaf.keys())) \
/ (math.sqrt(sum(author_entity_to_efiaf.values())) * math.sqrt(sum(query_entity_to_efiaf.values())))
def efiaf_score(self, query_entities, author_id):
author_papers = self.author_papers_count(author_id)
author_entity_to_ef = dict((t[0], t[1]/float(author_papers)) for t in self.author_entity_frequency(author_id))
query_entity_to_efiaf = self.ef_iaf_entities(query_entities)
return sum(author_entity_to_ef[e] * query_entity_to_efiaf[e] for e in set(query_entities) & set(author_entity_to_ef.keys()))
def eciaf_score(self, query_entities, author_id):
author_entity_to_ec = dict((t[0], t[1]) for t in self.author_entity_frequency(author_id))
query_entity_to_efiaf = self.ef_iaf_entities(query_entities)
return sum(author_entity_to_ec[e] * query_entity_to_efiaf[e] for e in set(query_entities) & set(author_entity_to_ec.keys()))
def log_ec_ef_iaf_score(self, query_entities, author_id):
author_papers = self.author_papers_count(author_id)
author_entity_to_ec = dict((t[0], t[1]) for t in self.author_entity_frequency(author_id))
query_entity_to_efiaf = self.ef_iaf_entities(query_entities)
return sum((math.log(author_entity_to_ec[e]) + author_entity_to_ec[e]/float(author_papers)) * query_entity_to_efiaf[e] for e in set(query_entities) & set(author_entity_to_ec.keys()))
def relatedness_geom(self, query_entities, author_id):
e_a_f = self.author_entity_frequency(author_id)
author_entity_to_ec = dict((t[0], t[1]) for t in e_a_f)
author_entity_to_maxrho = dict((t[0], t[3]) for t in e_a_f)
alpha = 10.0**-5
x = 10.0
self._prefetch_relatedness(query_entities, author_entity_to_ec.keys())
relatedness_weights = {}
for q_entity in query_entities:
q_entity_relatedness = [(a_entity, self.rel_dict[_str_titles(q_entity, a_entity)]) for a_entity in author_entity_to_ec.keys()]
val_weights = [(1.0 - r**x + alpha, author_entity_to_ec[a_entity] * author_entity_to_maxrho[a_entity]) for a_entity, r in q_entity_relatedness]
relatedness_weights[q_entity] = val_weights
return mean([clip(1 - weighted_geom_mean(relatedness_weights[q_entity]) + alpha, 0.0, 1.0) ** (1.0/x) for q_entity in relatedness_weights])
def find_expert(self, query, scoring):
logging.debug(u"Processing query: {}".format(query))
start_time = time.time()
query_entities = set(a.entity_title for a in entities(query))
logging.debug(u"Found the following entities in the query: {}".format(u",".join(query_entities)))
authors = self.citing_authors(query_entities)
logging.debug(u"Found %d authors that matched the query, computing score for each of them." % len(authors))
results = []
for author_id in authors:
score = scoring(self, query_entities, author_id)
name = self.name(author_id)
results.append({"name":name, "author_id":author_id, "score":score})
logging.debug(u"%s score=%.3f", name, score)
runtime = time.time() - start_time
logging.info("Query completed in %.3f sec" % (runtime,))
return sorted(results, key=lambda t: t["score"], reverse=True), runtime, query_entities
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, utils, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
# we check timeouts every TIMEOUT_PRECISION seconds
TIMEOUT_PRECISION = 4
MSG_FASTOPEN = 0x20000000
# SOCKS CMD defination
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# TCP Relay can be either sslocal or ssserver
# for sslocal it is called is_local=True
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, we have 2 streams:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
# for each handler, it could be at one of several stages:
# sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# stream direction
STREAM_UP = 0
STREAM_DOWN = 1
# stream wait status, indicating it's waiting for reading, etc
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
logging.debug('chosen server: %s:%d', server, server_port)
# TODO support multiple server IP
return server, server_port
def _update_activity(self):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (remote_addr, remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
self._update_activity()
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
self._update_activity()
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._last_time = time.time()
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
loop.add_handler(self._handle_events)
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler):
# set handler to active
now = int(time.time())
if now - handler.last_activity < TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(utils.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def _handle_events(self, events):
# handle events and dispatch to handlers
for sock, fd, event in events:
if sock:
logging.log(utils.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
continue
else:
logging.error(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
now = time.time()
if now - self._last_time > TIMEOUT_PRECISION:
self._sweep_timeout()
self._last_time = now
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed listen port %d', self._listen_port)
if not self._fd_to_handlers:
self._eventloop.remove_handler(self._handle_events)
def close(self, next_tick=False):
self._closed = True
if not next_tick:
self._server_socket.close()
|
|
# -*- coding: utf-8 -*-
"""
Admin Controllers
"""
module = request.controller
resourcename = request.function
# S3 framework functions
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
@auth.s3_requires_membership(1)
def setting():
"""
Custom page to link to those Settings which can be edited through the web interface
"""
return dict()
# =============================================================================
# AAA
# =============================================================================
@auth.s3_requires_membership(1)
def role():
"""
Role Manager
"""
# ACLs as component of roles
s3db.add_components("auth_group",
**{auth.permission.TABLENAME: "group_id"}
)
def prep(r):
if r.representation != "html":
return False
handler = s3base.S3RoleManager()
modules = settings.modules
handler.controllers = Storage([(m, modules[m])
for m in modules
if modules[m].restricted])
# Configure REST methods
set_handler = r.set_handler
set_handler("users", handler)
set_handler("read", handler)
set_handler("list", handler)
set_handler("copy", handler)
set_handler("create", handler)
set_handler("update", handler)
set_handler("delete", handler)
return True
s3.prep = prep
s3.stylesheets.append( "S3/role.css" )
output = s3_rest_controller("auth", "group")
return output
# -----------------------------------------------------------------------------
def user():
""" RESTful CRUD controller """
table = auth.settings.table_user
if s3_has_role("ADMIN"):
# Needed as Admin has all roles
pe_ids = None
elif s3_has_role("ORG_ADMIN"):
if settings.get_security_policy() < 6:
# Filter users to just those belonging to the Org Admin's Org & Descendants
otable = s3db.org_organisation
pe_id = db(otable.id == auth.user.organisation_id).select(otable.pe_id,
limitby=(0, 1),
cache=s3db.cache,
).first().pe_id
pe_ids = s3db.pr_get_descendants(pe_id, entity_types="org_organisation")
pe_ids.append(pe_id)
s3.filter = (otable.pe_id.belongs(pe_ids)) & \
(table.organisation_id == otable.id)
else:
# Filter users to just those belonging to the Org Admin's Realms
pe_ids = auth.user.realms[auth.get_system_roles().ORG_ADMIN]
if pe_ids:
otable = s3db.org_organisation
s3.filter = (otable.pe_id.belongs(pe_ids)) & \
(table.organisation_id == otable.id)
else:
auth.permission.fail()
auth.configure_user_fields(pe_ids)
s3db.add_components("auth_user",
auth_membership = "user_id")
list_fields = ["first_name",
"last_name",
"email",
]
lappend = list_fields.append
if len(settings.get_L10n_languages()) > 1:
lappend("language")
if auth.s3_has_role("ADMIN"):
if settings.get_auth_admin_sees_organisation():
lappend("organisation_id")
elif settings.get_auth_registration_requests_organisation():
lappend("organisation_id")
if settings.get_auth_registration_requests_organisation_group():
lappend("org_group_id")
if settings.get_auth_registration_requests_site():
lappend("site_id")
link_user_to = settings.get_auth_registration_link_user_to()
if link_user_to and len(link_user_to) > 1 and settings.get_auth_show_link():
lappend("link_user_to")
lappend((T("Registration"), "created_on"))
table.created_on.represent = s3base.S3DateTime.date_represent
lappend((T("Roles"), "membership.group_id"))
s3db.configure("auth_user",
create_next = URL(c="admin", f="user", args=["[id]", "roles"]),
create_onaccept = lambda form: auth.s3_approve_user(form.vars),
list_fields = list_fields,
main = "first_name",
#update_onaccept = lambda form: auth.s3_link_user(form.vars),
)
def disable_user(r, **args):
if not r.id:
session.error = T("Can only disable 1 record at a time!")
redirect(URL(args=[]))
if r.id == session.auth.user.id: # we're trying to disable ourself
session.error = T("Cannot disable your own account!")
redirect(URL(args=[]))
db(table.id == r.id).update(registration_key = "disabled")
session.confirmation = T("User Account has been Disabled")
redirect(URL(args=[]))
def approve_user(r, **args):
if not r.id:
session.error = T("Can only approve 1 record at a time!")
redirect(URL(args=[]))
user = db(table.id == r.id).select(limitby=(0, 1)).first()
auth.s3_approve_user(user)
session.confirmation = T("User Account has been Approved")
redirect(URL(args=[r.id, "roles"]))
def link_user(r, **args):
if not r.id:
session.error = T("Can only update 1 record at a time!")
redirect(URL(args=[]))
user = db(table.id == r.id).select(limitby=(0, 1)).first()
auth.s3_link_user(user)
session.confirmation = T("User has been (re)linked to Person and Human Resource record")
redirect(URL(args=[]))
# Custom Methods
set_method = s3db.set_method
set_method("auth", "user", method="roles",
action=s3base.S3RoleManager())
set_method("auth", "user", method="disable",
action=disable_user)
set_method("auth", "user", method="approve",
action=approve_user)
set_method("auth", "user", method="link",
action=link_user)
# CRUD Strings
ADD_USER = T("Create User")
s3.crud_strings["auth_user"] = Storage(
label_create = ADD_USER,
title_display = T("User Details"),
title_list = T("Users"),
title_update = T("Edit User"),
title_upload = T("Import Users"),
label_list_button = T("List Users"),
label_delete_button = T("Delete User"),
msg_record_created = T("User added"),
msg_record_modified = T("User updated"),
msg_record_deleted = T("User deleted"),
msg_list_empty = T("No Users currently registered"))
def rheader(r):
if r.representation != "html":
return None
rheader = DIV()
if r.record:
id = r.id
registration_key = r.record.registration_key
if not registration_key:
btn = A(T("Disable"),
_class = "action-btn",
_title = "Disable User",
_href = URL(args=[id, "disable"])
)
rheader.append(btn)
if settings.get_auth_show_link():
btn = A(T("Link"),
_class = "action-btn",
_title = "Link (or refresh link) between User, Person & HR Record",
_href = URL(args=[id, "link"])
)
rheader.append(btn)
#elif registration_key == "pending":
# btn = A(T("Approve"),
# _class = "action-btn",
# _title = "Approve User",
# _href = URL(args=[id, "approve"])
# )
# rheader.append(btn)
else:
# Verify & Approve
btn = A(T("Approve"),
_class = "action-btn",
_title = "Approve User",
_href = URL(args=[id, "approve"])
)
rheader.append(btn)
tabs = [(T("User Details"), None),
(T("Roles"), "roles")
]
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader.append(rheader_tabs)
return rheader
# Pre-processor
def prep(r):
if r.interactive:
s3db.configure(r.tablename,
addbtn = True,
# Password confirmation
create_onvalidation = user_create_onvalidation,
deletable = False,
# jquery.validate is clashing with dataTables so don't embed the create form in with the List
listadd = False,
sortby = [[2, "asc"], [1, "asc"]],
)
elif r.representation == "xls":
lappend((T("Status"), "registration_key"))
if r.method == "delete" and r.http == "GET":
if r.id == session.auth.user.id: # we're trying to delete ourself
get_vars.update({"user.id":str(r.id)})
r.id = None
s3db.configure(r.tablename,
delete_next = URL(c="default", f="user/logout"))
s3.crud.confirm_delete = T("You are attempting to delete your own account - are you sure you want to proceed?")
if r.http == "GET" and not r.method:
session.s3.cancel = r.url()
return True
s3.prep = prep
def postp(r, output):
if r.interactive and isinstance(output, dict):
# Only show the disable button if the user is not currently disabled
table = r.table
query = (table.registration_key == None) | \
(table.registration_key == "")
rows = db(query).select(table.id)
restrict = [str(row.id) for row in rows]
s3.actions = [dict(label=str(UPDATE), _class="action-btn",
url=URL(c="admin", f="user",
args=["[id]", "update"])),
dict(label=str(T("Roles")), _class="action-btn",
url=URL(c="admin", f="user",
args=["[id]", "roles"])),
dict(label=str(T("Disable")), _class="action-btn",
url=URL(c="admin", f="user",
args=["[id]", "disable"]),
restrict = restrict)
]
if settings.get_auth_show_link():
s3.actions.insert(1, dict(label=str(T("Link")),
_class="action-btn",
_title = str(T("Link (or refresh link) between User, Person & HR Record")),
url=URL(c="admin", f="user",
args=["[id]", "link"]),
restrict = restrict)
)
# Only show the approve button if the user is currently pending
query = (table.registration_key != "disabled") & \
(table.registration_key != None) & \
(table.registration_key != "")
rows = db(query).select(table.id)
restrict = [str(row.id) for row in rows]
s3.actions.append(
dict(label=str(T("Approve")), _class="action-btn",
url=URL(c="admin", f="user",
args=["[id]", "approve"]),
restrict = restrict)
)
# Add some highlighting to the rows
query = (table.registration_key.belongs(["disabled", "pending"]))
rows = db(query).select(table.id,
table.registration_key)
s3.dataTableStyleDisabled = s3.dataTableStyleWarning = [str(row.id) for row in rows if row.registration_key == "disabled"]
s3.dataTableStyleAlert = [str(row.id) for row in rows if row.registration_key == "pending"]
# Translate the status values
values = [dict(col=6, key="", display=str(T("Active"))),
dict(col=6, key="None", display=str(T("Active"))),
dict(col=6, key="pending", display=str(T("Pending"))),
dict(col=6, key="disabled", display=str(T("Disabled")))
]
s3.dataTableDisplay = values
# @ToDo: Merge these with the code in s3aaa.py and use S3SQLCustomForm to implement
form = output.get("form", None)
if not form:
create_url = URL(args=["create"])
output["showadd_btn"] = s3base.S3CRUD.crud_button(T("Create User"),
_href=create_url)
return output
# Assume formstyle callable
id = "auth_user_password_two__row"
label = "%s:" % T("Verify password")
widget = INPUT(_name="password_two",
_id="password_two",
_type="password",
_disabled="disabled",
)
comment = ""
row = s3_formstyle(id, label, widget, comment, hidden=True)
if isinstance(row, tuple):
# Formstyle with separate row for label (e.g. default Eden formstyle)
tuple_rows = True
form[0].insert(8, row)
else:
# Formstyle with just a single row (e.g. Bootstrap, Foundation or DRRPP)
tuple_rows = False
form[0].insert(4, row)
# @ToDo: Ensure this reads existing values & creates/updates when saved
#if settings.get_auth_registration_requests_mobile_phone():
# id = "auth_user_mobile__row"
# label = LABEL("%s:" % settings.get_ui_label_mobile_phone(),
# _for="mobile",
# )
# widget = INPUT(_name="mobile",
# _id="auth_user_mobile",
# _class="string",
# )
# comment = ""
# row = s3_formstyle(id, label, widget, comment)
# if tuple_rows:
# form[0].insert(-8, row)
# else:
# form[0].insert(-4, row)
# Add client-side validation
auth.s3_register_validation()
return output
s3.postp = postp
s3.import_prep = auth.s3_import_prep
output = s3_rest_controller("auth", "user",
csv_stylesheet = ("auth", "user.xsl"),
csv_template = ("auth", "user"),
rheader = rheader,
)
return output
# =============================================================================
def group():
"""
RESTful CRUD controller
- used by role_required autocomplete
"""
tablename = "auth_group"
if not auth.s3_has_role(ADMIN):
s3db.configure(tablename,
editable=False,
insertable=False,
deletable=False)
# CRUD Strings
ADD_ROLE = T("Create Role")
s3.crud_strings[tablename] = Storage(
label_create = ADD_ROLE,
title_display = T("Role Details"),
title_list = T("Roles"),
title_update = T("Edit Role"),
label_list_button = T("List Roles"),
msg_record_created = T("Role added"),
msg_record_modified = T("Role updated"),
msg_record_deleted = T("Role deleted"),
msg_list_empty = T("No Roles defined"))
s3db.configure(tablename, main="role")
return s3_rest_controller("auth", resourcename)
# -----------------------------------------------------------------------------
@auth.s3_requires_membership(1)
def organisation():
"""
RESTful CRUD controller
@ToDo: Prevent multiple records for the same domain
"""
module = "auth"
tablename = "auth_organisation"
table = s3db[tablename]
s3.crud_strings[tablename] = Storage(
label_create = T("Add Organization Domain"),
title_display = T("Organization Domain Details"),
title_list = T("Organization Domains"),
title_update = T("Edit Organization Domain"),
label_list_button = T("List Organization Domains"),
label_delete_button = T("Delete Organization Domain"),
msg_record_created = T("Organization Domain added"),
msg_record_modified = T("Organization Domain updated"),
msg_record_deleted = T("Organization Domain deleted"),
msg_list_empty = T("No Organization Domains currently registered")
)
output = s3_rest_controller(module, resourcename)
return output
# -----------------------------------------------------------------------------
def user_create_onvalidation (form):
""" Server-side check that Password confirmation field is valid """
if (form.request_vars.has_key("password_two") and \
form.request_vars.password != form.request_vars.password_two):
form.errors.password = T("Password fields don't match")
return True
# =============================================================================
@auth.s3_requires_membership(1)
def acl():
"""
Preliminary controller for ACLs
for testing purposes, not for production use!
"""
module = "s3"
name = "permission"
table = auth.permission.table
tablename = table._tablename
table.group_id.requires = IS_ONE_OF(db, "auth_group.id", "%(role)s")
table.group_id.represent = lambda opt: opt and db.auth_group[opt].role or opt
table.controller.requires = IS_EMPTY_OR(IS_IN_SET(settings.modules.keys(),
zero="ANY"))
table.controller.represent = lambda opt: opt and \
"%s (%s)" % (opt,
auth.permission.modules.get(opt, {}).get("name_nice", opt)) or "ANY"
table.function.represent = lambda val: val and val or T("ANY")
table.tablename.requires = IS_EMPTY_OR(IS_IN_SET([t._tablename for t in db],
zero=T("ANY")))
table.tablename.represent = lambda val: val and val or T("ANY")
table.uacl.label = T("All Resources")
table.uacl.widget = S3ACLWidget.widget
table.uacl.requires = IS_ACL(auth.permission.PERMISSION_OPTS)
table.uacl.represent = lambda val: acl_represent(val,
auth.permission.PERMISSION_OPTS)
table.oacl.label = T("Owned Resources")
table.oacl.widget = S3ACLWidget.widget
table.oacl.requires = IS_ACL(auth.permission.PERMISSION_OPTS)
table.oacl.represent = lambda val: acl_represent(val,
auth.permission.PERMISSION_OPTS)
s3db.configure(tablename,
create_next = URL(r=request),
update_next = URL(r=request))
if "_next" in request.vars:
next = request.vars._next
s3db.configure(tablename, delete_next=next)
output = s3_rest_controller(module, name)
return output
# -----------------------------------------------------------------------------
def acl_represent(acl, options):
"""
Represent ACLs in tables
for testing purposes, not for production use!
"""
values = []
for o in options.keys():
if o == 0 and acl == 0:
values.append("%s" % options[o][0])
elif acl and acl & o == o:
values.append("%s" % options[o][0])
else:
values.append("_")
return " ".join(values)
# =============================================================================
# Ticket viewing
# =============================================================================
@auth.s3_requires_membership(1)
def ticket():
""" Ticket handler """
import traceback
from gluon.restricted import RestrictedError
if len(request.args) != 2:
session.error = T("Invalid ticket")
redirect(URL(r=request))
app = request.args[0]
ticket = request.args[1]
e = RestrictedError()
e.load(request, app, ticket)
return dict(app=app,
ticket=ticket,
traceback=s3base.Traceback(e.traceback),
code=e.code,
layer=e.layer)
# -----------------------------------------------------------------------------
# Web2Py Ticket Viewer functions Borrowed from admin application of web2py
@auth.s3_requires_membership(1)
def errors():
""" Error handler """
from gluon.admin import apath
from gluon.fileutils import listdir
for item in request.vars:
if item[:7] == "delete_":
os.unlink(apath("%s/errors/%s" % (appname, item[7:]), r=request))
func = lambda p: os.stat(apath("%s/errors/%s" % (appname, p), r=request)).st_mtime
tickets = sorted(listdir(apath("%s/errors/" % appname, r=request), "^\w.*"),
key=func,
reverse=True)
return dict(app=appname, tickets=tickets)
# =============================================================================
# Management scripts
# =============================================================================
@auth.s3_requires_membership(1)
def clean():
"""
Run an external script to clean this instance & reset to default values
visudo
web2py ALL=(ALL)NOPASSWD:/usr/local/bin/clean
"""
from subprocess import check_call
instance = settings.get_instance_name()
try:
check_call(["sudo /usr/local/bin/clean %s" % instance], shell=True)
except:
import sys
error = sys.exc_info()[1]
status = current.xml.json_message(False, 400,
"Script cannot be run: %s" % error)
raise HTTP(400, body=status)
# =============================================================================
# Create portable app
# =============================================================================
@auth.s3_requires_membership(1)
def portable():
""" Portable app creator"""
from gluon.admin import apath
import os
from operator import itemgetter, attrgetter
uploadfolder=os.path.join(apath("%s" % appname, r=request), "cache")
web2py_source = None
web2py_source_exists = False
last_build_exists = False
for base, dirs, files in os.walk(uploadfolder):
for filename in files:
if "web2py_source" in filename:
web2py_source_exists = True
web2py_source = filename
break
for base, dirs, files in os.walk(uploadfolder):
for filename in files:
if "download.zip" in filename:
last_build_exists = True
break
web2py_form = SQLFORM.factory(
Field("web2py_source",
"upload",
uploadfolder=uploadfolder,
requires=IS_UPLOAD_FILENAME(extension="zip"),
),
table_name="web2py_source",
)
if web2py_form.accepts(request.vars, keepvalues=True, session=None):
# Make sure only one web2py source file exists
files_to_remove = {}
for base, dirs, files in os.walk(uploadfolder):
for filename in files:
if "web2py_source" in filename:
files_to_remove[filename] = os.stat(os.path.join(uploadfolder, filename)).st_mtime
sorted_files = sorted(files_to_remove.items(), key=itemgetter(1))
for i in range(0, len(sorted_files) - 1): # 1 indicates leave one file
os.remove(os.path.join(uploadfolder,sorted_files[i][0]))
web2py_source = sorted_files[len(sorted_files) - 1][0]
web2py_source_exists = True
session.flash = T("Web2py executable zip file found - Upload to replace the existing file")
else:
# Lets throw an error message if this the zip file isn't found
if not web2py_source_exists:
session.error = T("Web2py executable zip file needs to be uploaded to use this function.")
else:
session.flash = T("Web2py executable zip file found - Upload to replace the existing file")
# Since the 2nd form depends on having uploaded the zip
# in order to work we only show it if the upload was successfully
# completed.
if web2py_source_exists:
generator_form = SQLFORM.factory(
Field("copy_database", "boolean"),
Field("copy_uploads", "boolean"),
)
if generator_form.accepts(request.vars, keepvalues=True, session=None):
if web2py_source_exists:
create_portable_app(web2py_source=web2py_source,\
copy_database = request.vars.copy_database,\
copy_uploads = request.vars.copy_uploads)
else:
session.error = T("Web2py executable zip file needs to be uploaded first to use this function.")
else:
generator_form = None
if last_build_exists:
download_last_form = SQLFORM.factory()
if download_last_form.accepts(request.vars, keepvalues=True, session=None):
portable_app = os.path.join(cachedir, "download.zip")
response.headers["Content-Type"] = contenttype.contenttype(portable_app)
response.headers["Content-Disposition"] = \
"attachment; filename=portable-sahana.zip"
return response.stream(portable_app)
else:
download_last_form = None
return dict(
web2py_form=web2py_form,
generator_form=generator_form,
download_last_form=download_last_form
)
# -----------------------------------------------------------------------------
def create_portable_app(web2py_source, copy_database=False, copy_uploads=False):
"""Function to create the portable app based on the parameters"""
from gluon.admin import apath
import shutil,tempfile,os
import zipfile
import contenttype
cachedir = os.path.join(apath("%s" % appname, r=request), "cache")
tempdir = tempfile.mkdtemp("", "eden-", cachedir)
workdir = os.path.join(tempdir, "web2py")
if copy_uploads:
ignore = shutil.ignore_patterns("*.db", "*.log", "*.table", "errors", "sessions", "compiled" , "cache", ".bzr", "*.pyc")
else:
ignore = shutil.ignore_patterns("*.db", "*.log", "*.table", "errors", "sessions", "compiled" , "uploads", "cache", ".bzr", "*.pyc")
appdir = os.path.join(workdir, "applications", appname)
shutil.copytree(apath("%s" % appname, r=request),\
appdir, \
ignore = ignore)
os.mkdir(os.path.join(appdir, "errors"))
os.mkdir(os.path.join(appdir, "sessions"))
os.mkdir(os.path.join(appdir, "cache"))
if not copy_uploads:
os.mkdir(os.path.join(appdir, "uploads"))
shutil.copy(os.path.join(appdir, "deployment-templates", "cron", "crontab"),\
os.path.join(appdir, "cron", "crontab"))
if copy_database:
# Copy the db for the portable app
s3db.load_all_models() # Load all modules to copy everything
portable_db = DAL("sqlite://storage.db", folder=os.path.join(appdir, "databases"))
for table in db:
portable_db.define_table(table._tablename, *[field for field in table])
portable_db.commit()
temp_csv_file=tempfile.mkstemp()
db.export_to_csv_file(open(temp_csv_file[1], "wb"))
portable_db.import_from_csv_file(open(temp_csv_file[1], "rb"))
os.unlink(temp_csv_file[1])
portable_db.commit()
# Replace the following with a more specific config
config_template = open(os.path.join(appdir, "deployment-templates", "models", "000_config.py"), "r")
new_config = open(os.path.join(appdir, "models", "000_config.py"), "w")
# Replace first occurance of False with True
new_config.write(config_template.read().replace("False", "True", 1))
new_config.close()
# Embedded the web2py source with eden for download
shutil.copy(os.path.join(cachedir, web2py_source), os.path.join(cachedir, "download.zip"))
portable_app = os.path.join(cachedir, "download.zip")
zip = zipfile.ZipFile(portable_app, "a", zipfile.ZIP_DEFLATED)
tozip = os.path.join(tempdir, "web2py")
rootlen = len(tempdir) + 1
for base, dirs, files in os.walk(tozip):
for directory in dirs:
directory = os.path.join(base, directory)
zip.write(directory, directory[rootlen:]) # Create empty directories
for file in files:
fn = os.path.join(base, file)
zip.write(fn, fn[rootlen:])
zip.close()
shutil.rmtree(tempdir)
response.headers["Content-Type"] = contenttype.contenttype(portable_app)
response.headers["Content-Disposition"] = \
"attachment; filename=portable-sahana.zip"
return response.stream(portable_app)
# =============================================================================
# Translation Functionality
# =============================================================================
def translate():
"""
Translation controller to enable four major workflows :-
1) Select modules which require translation. The list of strings
belonging to selected modules can be exported in .xls or .po format
2) Upload csv file containing strings with their translations which
are then merged with existing language file
3) Display the percentage of translation for each module for a given
language file
4) Upload a text file containing a list of new-line separated strings
which are to be considered for translation in the future. These
strings are termed as "user-supplied" strings and are picked up by
the first workflow when preparing the spreadsheet for translation
Note : The above functionalities require a considerable amount of
main memory to execute successfully.
@ToDo: Move opts 1, 3 & 4 outside the REST Controller
- only opt 2 makes use of this so it's unnecessary overhead!
"""
opt = get_vars.get("opt", None)
if not opt:
# Show index page
return dict()
# For the one which actually uses CRUD (opt 2)
s3.crud.submit_button = T("Upload")
def postp(r, output):
# Create a custom form
form = FORM()
# Prevent redirection
r.next = None
# Remove the false error from the form
# error : "Invalid form (re-opened in another window?)"
if response.error and not output["form"]["error"]:
response.error = None
if opt == "1":
# Select modules for Translation
from math import ceil
from s3.s3translate import TranslateAPI, Strings
if form.accepts(request.vars, session):
modlist = []
# If only one module is selected
if type(form.request_vars.module_list) == str:
modlist.append(form.request_vars.module_list)
# If multiple modules are selected
else:
modlist = form.request_vars.module_list
# If no module is selected
if modlist is None:
modlist = []
# If "Select All" option is chosen
all_template_flag = 0
if "all" in modlist:
all_template_flag = 1
A = TranslateAPI()
modlist = A.get_modules()
if "core" in form.request_vars.module_list:
modlist.append("core")
# Obtaining the language file from the language code
code = form.request_vars.new_code
if code == "":
code = form.request_vars.code
code += ".py"
# Obtaining the type of file to export to
filetype = form.request_vars.filetype
# Generate the file to download
X = Strings()
output = X.export_file(code, modlist, [], filetype, all_template_flag)
return output
# Create a form with checkboxes for list of modules
# @todo: migrate to formstyle, use regular form widgets
A = TranslateAPI()
# Retrieve list of active modules
activemodlist = settings.modules.keys()
modlist = activemodlist
# Hiding core modules
hidden_modules = A.core_modules
for module in hidden_modules:
if module in modlist:
modlist.remove(module)
modlist.sort()
modcount = len(modlist)
langlist = A.get_langcodes()
langlist.sort()
table = TABLE(_class="translation_module_table")
table.append(BR())
# Set number of columns in the form
NO_OF_COLUMNS = 3
# Display "NO_OF_COLUMNS" modules per row so as to utilize the page completely
num = 0
max_rows = int(ceil(modcount / float(NO_OF_COLUMNS)))
modules = settings.modules
while num < max_rows:
check = "yes"
mod_name = modules[modlist[num]].name_nice
mod_name = "%s (%s)" %(mod_name, modlist[num])
row = TR(TD(num + 1),
TD(INPUT(_type="checkbox",
_name="module_list",
_value=modlist[num],
_checked = check,
_class="translate-select-module",
)),
TD(mod_name),
)
for c in range(1, NO_OF_COLUMNS):
cmax_rows = num + (c * max_rows)
if cmax_rows < modcount:
mod_name = modules[modlist[cmax_rows]].name_nice
mod_name = "%s (%s)" % (mod_name, modlist[cmax_rows])
row.append(TD(cmax_rows + 1))
row.append(TD(INPUT(_type="checkbox",
_name="module_list",
_value=modlist[cmax_rows],
_checked = check,
_class="translate-select-module",
)))
row.append(TD(mod_name))
num += 1
table.append(row)
div = DIV(table, BR())
# Toogle box to select/de-select all modules
row = TR(TD(INPUT(_type="checkbox",
_class="translate-select-all-modules",
_checked=check,
),
),
TD(T("Select all modules")),
)
script = '''$('.translate-select-all-modules').click(function(){$('.translate-select-module').prop('checked',$(this).prop('checked'));})'''
current.response.s3.jquery_ready.append(script)
div.append(row)
div.append(BR())
# Checkbox for inclusion of core files
row = TR(TD(INPUT(_type="checkbox", _name="module_list",
_value="core", _checked="yes")),
TD(T("Include core files")),
)
div.append(row)
div.append(BR())
# Checkbox for inclusion of templates
row = TR(TD(INPUT(_type="checkbox", _name="module_list",
_value="all")),
TD(T("Select all templates (All modules included)")),
)
div.append(row)
div.append(BR())
# Provide option to choose export format
row = TR(TD("%s:" % T("Export as")),
TD(INPUT(_type="radio", _name="filetype",
_value="xls", _checked="checked")),
TD(".xls (Excel)"),
TD(INPUT(_type="radio", _name="filetype", _value="po")),
TD(".po (Pootle)"),
BR(),
BR(),
)
div.append(row)
# Drop-down for available language codes
lang_dropdown = SELECT(_name = "code")
for lang in langlist:
lang_dropdown.append(lang)
row = TR(TD("%s:" % T("Select language code")),
TD(lang_dropdown),
TD("%s:" % T("Or add a new language code")),
TD(INPUT(_type="text", _name="new_code")),
)
div.append(row)
div.append(BR())
div.append(BR())
div.append(INPUT(_type="submit", _value=T("Submit")))
form.append(div)
# Add the custom form to the output
output["form"] = form
output["title"] = T("Select the required modules")
elif opt == "2":
# Upload translated files
form = output["form"]
div = DIV(BR(),
T("Note: Make sure that all the text cells are quoted in the csv file before uploading"),
)
form.append(div)
output["form"] = form
elif opt == "3":
# View Translation Percentage
if form.accepts(request.vars, session):
# Retrieve the translation percentage for each module
from math import ceil
from s3.s3translate import TranslateReportStatus
code = form.request_vars.code
S = TranslateReportStatus()
if form.request_vars.update_master == "on":
S.create_master_file()
percent_dict = S.get_translation_percentages(code)
modlist = []
for mod in sorted(percent_dict.keys()):
if mod != "complete_file":
modlist.append(mod)
modcount = len(modlist)
table = TABLE(_class="translation_module_table")
table.append(BR())
# Set number of columns in the table
NO_OF_COLUMNS = 3
# Display "NO_OF_COLUMNS" modules per row so as to utilize the page completely
num = 0
max_rows = int(ceil(modcount / float(NO_OF_COLUMNS)))
while num < max_rows:
row = TR(TD(modlist[num]), TD(percent_dict[modlist[num]]))
for c in range(1, NO_OF_COLUMNS):
cmax_rows = num + (c * max_rows)
if cmax_rows < modcount:
row.append(TD(modlist[cmax_rows]))
row.append(TD(percent_dict[modlist[cmax_rows]]))
num += 1
table.append(row)
# Add the table to output to display it
div = DIV(table,
BR(),
TR(TD("Overall translation percentage of the file: "),
TD(percent_dict["complete_file"])),
)
form.append(div)
output["form"] = form
output["title"] = T("Module-wise Percentage of Translated Strings")
s3.has_required = False
else:
# Display the form to view translated percentage
from s3.s3translate import TranslateAPI
A = TranslateAPI()
langlist = A.get_langcodes()
langlist.sort()
# Drop-down for selecting language codes
lang_col = TD()
lang_dropdown = SELECT(_name="code")
for lang in langlist:
lang_dropdown.append(lang)
lang_col.append(lang_dropdown)
row = TR(TD("%s:" % T("Language Code")), TD(lang_col))
div = DIV(row,
BR(),
)
row = TR(TD(INPUT(_type="checkbox", _name="update_master")),
TD(T("Update Master file")))
div.append(row)
div.append(BR())
div.append(BR())
div.append(INPUT(_type="submit", _value=T("Submit")))
form.append(div)
# Add the custom form to the output
output["title"] = T("Select the language file")
output["form"] = form
elif opt == "4":
# Add strings manually
if form.accepts(request.vars, session):
# Retrieve strings from the uploaded file
from s3.s3translate import TranslateReadFiles
f = request.vars.upload.file
strings = []
R = TranslateReadFiles()
for line in f:
strings.append(line)
# Update the file containing user strings
R.merge_user_strings_file(strings)
response.confirmation = T("File uploaded")
div = DIV(T("Upload a text file containing new-line separated strings:"),
INPUT(_type="file", _name="upload"),
BR(),
INPUT(_type="submit", _value=T("Upload")),
)
form.append(div)
output["form"] = form
return output
s3.postp = postp
output = s3_rest_controller("translate", "language")
return output
# =============================================================================
# Selenium Test Results
# =============================================================================
def result():
"""
Selenium Test Result Reports list
"""
file_list = UL()
static_path = os.path.join(request.folder, "static", "test")
for filename in os.listdir(static_path):
link = A(filename,
_href = URL(c = "static",
f = "test",
args = [filename]
)
)
file_list.append(link)
return dict(file_list=file_list)
# -----------------------------------------------------------------------------
def result_automated():
"""
Selenium Test Result Reports list
"""
file_list_automated = UL()
static_path = os.path.join(request.folder, "static", "test_automated")
filenames = os.listdir(static_path)
filenames.reverse()
for filename in filenames:
link = A(filename,
_href = URL(c = "static",
f = "test_automated",
args = [filename]
)
)
file_list_automated.append(link)
return dict(file_list_automated=file_list_automated)
# -----------------------------------------------------------------------------
def result_smoke():
"""
Selenium Test Result Reports list
"""
file_list_smoke = UL()
static_path = os.path.join(request.folder, "static", "test_smoke")
filenames = os.listdir(static_path)
filenames.reverse()
for filename in filenames:
link = A(filename,
_href = URL(c = "static",
f = "test_smoke",
args = [filename]
)
)
file_list_smoke.append(link)
return dict(file_list_smoke=file_list_smoke)
# -----------------------------------------------------------------------------
def result_roles():
"""
Selenium Test Result Reports list
"""
file_list_roles = UL()
static_path = os.path.join(request.folder, "static", "test_roles")
filenames = os.listdir(static_path)
filenames.reverse()
for filename in filenames:
link = A(filename,
_href = URL(c = "static",
f = "test_roles",
args = [filename]
)
)
file_list_roles.append(link)
return dict(file_list_roles=file_list_roles)
# END =========================================================================
|
|
# -*- coding: utf-8 -*-
__author__ = 'pbmanis'
"""
ChR2TraceAnalyzer provides a platform for the analysis of voltage or current traces.
The module provides standard tools to access data and databases, and flowcharts.
Two "trace" windows (PSP Plot and Data Plot) are provided to facilitate interactive
adjustment of analysis parameters.
Three "scatter plots" are provided on the right for summary analysis.
The output data is placed into a table.
The specifics of the analysis depends on the choice of the flowchart.
12/5/2013-4/19/2014 pbmanis
"""
from PyQt4 import QtGui
from acq4.analysis.AnalysisModule import AnalysisModule
#import acq4.analysis.modules.EventDetector as EventDetector
from collections import OrderedDict
import pyqtgraph as pg
#from metaarray import MetaArray
#from DBCtrl import DBCtrl
#import numpy as np
from acq4.util.DirTreeWidget import DirTreeLoader
from acq4.util.FileLoader import FileLoader
import acq4.util.flowchart as fc # was acq4.pyqtgraph.flowchart - but it does not have the same filters ??????????
#import acq4.pyqtgraph.debug as debug
import os
import glob
import acq4.analysis.scripts.chr2analysis as ChR2
class ChR2TraceAnalyzer(AnalysisModule):
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.ChR2 = ChR2.ChR2() # create instance of the analysis
fcpath = os.path.join(os.path.abspath(os.path.split(__file__)[0]), "flowcharts")
confpath = os.path.join(os.path.abspath(os.path.split(__file__)[0]), "configs")
self.dbIdentity = "ChR2TraceAnalysis" ## how we identify to the database; this determines which tables we own
self._sizeHint = (1024, 800) # try to establish size of window
self.confWidget = QtGui.QWidget()
self.confLoader = ConfLoader(self, confpath)
self.fileLoader = DataLoader(self, host.dataManager())
self.addPlotBtn = QtGui.QPushButton('Add Plot')
self.processWidget = QtGui.QWidget()
self.processLayout = QtGui.QGridLayout()
self.processWidget.setLayout(self.processLayout)
self.processProtocolBtn = QtGui.QPushButton('Process Protocol')
self.processSliceBtn = QtGui.QPushButton('Process Slice')
self.processCellBtn = QtGui.QPushButton('Process Cell')
self.processCheck = QtGui.QCheckBox('Auto')
self.processLayout.addWidget(self.processSliceBtn, 0, 0)
self.processLayout.addWidget(self.processCellBtn, 1, 0)
self.processLayout.addWidget(self.processProtocolBtn, 2, 0)
self.processLayout.addWidget(self.processCheck, 3, 0)
self.confWidget = QtGui.QWidget()
self.confLayout = QtGui.QGridLayout()
self.confWidget.setLayout(self.confLayout)
self.confLayout.addWidget(self.confLoader, 0, 0)
self.confLayout.addWidget(self.addPlotBtn, 1, 0)
self.confLayout.addWidget(self.processWidget, 2, 0)
self.plots = []
self.params = None
self.data = None
## setup map DB ctrl
#self.dbCtrl = DBCtrl(self, self.dbIdentity)
self.flowchart = fc.Flowchart(filePath=fcpath)
self.flowchart.addInput('Input')
self.flowchart.addOutput('Output')
#self.flowchart.sigChartLoaded.connect(self.connectPlots)
## create event detector
fcDir = os.path.join(os.path.abspath(os.path.split(__file__)[0]), "detector_fc")
# self.detector = EventDetector.EventDetector(host, flowchartDir=fcDir, dbIdentity=self.dbIdentity+'.events')
self.flowchart.sigChartLoaded.connect(self.connectPlots)
#elems = self.detector.listElements()
#print elems
# Setup basic GUI
self._elements_ = OrderedDict([
('Configuration', {'type': 'ctrl', 'object': self.confWidget, 'size': (200,200)}),
('File Loader', {'type': 'ctrl', 'object': self.fileLoader, 'size': (200, 300), 'pos': ('above', 'Configuration')}),
('Flowchart', {'type': 'ctrl', 'object': self.flowchart.widget(), 'size': (400,500), 'pos': ('right', 'Configuration')}),
('Data Plot', {'type': 'plot', 'pos': ('bottom', 'Flowchart'), 'size': (400,300)}),
('PSP Plot', {'type': 'plot', 'pos': ('bottom', 'Data Plot'), 'size': (400,300)}),
('Scatter Plot1', {'type': 'plot', 'pos': ('right',), 'size': (300,300)}),
('Scatter Plot2', {'type': 'plot', 'pos': ('bottom', 'Scatter Plot1'), 'size': (300,300)}),
('Scatter Plot3', {'type': 'plot', 'pos': ('bottom', 'Scatter Plot2'), 'size': (300,300)}),
('Results', {'type': 'table', 'size': (500,200), 'pos': 'bottom'}),
])
self.initializeElements()
self.addPlotBtn.clicked.connect(self.addPlotClicked)
self.processSliceBtn.clicked.connect(self.processSliceClicked)
self.processCellBtn.clicked.connect(self.processCellClicked)
self.processProtocolBtn.clicked.connect(self.processProtocolClicked)
self.flowchart.sigOutputChanged.connect(self.outputChanged)
self.fileLoader.sigFileLoaded.connect(self.fileLoaded)
self.fileLoader.sigSelectedFileChanged.connect(self.fileSelected)
def processSliceClicked(self):
"""
The slice directory is selected. For every Cell in the slice,
process the cell.
"""
slicedir = self.fileLoader.ui.dirTree.selectedFiles()[0]
if not slicedir.isDir():
raise Exception('Must select exactly 1 slice directory to process')
dircontents = glob.glob(os.path.join(slicedir.name(), 'cell_*'))
for d in dircontents:
self.processCellClicked(sel = d)
print '\nAnalysis of Slice completed'
def processCellClicked(self, sel=None):
"""
A cell directory is selected. For each protocol that matches
our protocol selector, process the protocol for this cell.
"""
print 'ProcessCell received a request for: ', sel
if sel is None or sel is False: # called from gui - convert handle to str for consistency
sel = self.fileLoader.ui.dirTree.selectedFiles()[0].name() # select the cell
if not os.path.isdir(sel):
raise Exception('Must select a cell Directory to process')
dircontents = glob.glob(os.path.join(sel, 'BlueLED*'))
if dircontents != []:
for d in dircontents:
self.fileLoader.loadFile([self.dataManager().dm.getDirHandle(d)])
self.processProtocolClicked()
print "\nAnalysis of cell completed"
return
dircontents = glob.glob(os.path.join(sel, 'Laser-Blue*'))
if dircontents != []:
for d in dircontents:
self.fileLoader.loadFile([self.dataManager().dm.getDirHandle(d)])
self.processProtocolClicked()
print "\nAnalysis of cell completed"
return
def fileLoaded(self, dh):
files = self.fileLoader.loadedFiles()
self.flowchart.setInput(Input=files[0])
table = self.getElement('Results')
table.setData(None)
self.ChR2.clearSummary()
def fileSelected(self, dh):
self.flowchart.setInput(Input=dh)
def connectPlots(self):
plots = ['Data Plot', 'PSP Plot']
for plotName in plots:
dp = self.getElement(plotName, create=False)
if dp is not None and plotName in self.flowchart.nodes().keys():
self.flowchart.nodes()[plotName].setPlot(dp)
def addPlotClicked(self):
plot = pg.PlotWidget()
self.plots.append(plot)
node = self.flowchart.createNode('PlotWidget')
name = node.name()
node.setPlot(plot)
dock = self._host_.dockArea.addDock(name=name, position='bottom')
dock.addWidget(plot)
#dock.setTitle(name)
def processProtocolClicked(self):
# print ChR2.getSummary()
self.ChR2.clearSummary()
output = []
table = self.getElement('Results')
for i, fh in enumerate(self.fileLoader.loadedFiles()):
# print 'dir fh: ', dir(fh)
# print 'name: %s' % fh.name()
try:
res = self.flowchart.process(Input=fh, Output=self.ChR2.protocolInfoLaser, Instance=self.ChR2)
output.append(res) # [res[k] for k in res.keys()])
except:
raise ValueError('ChR2TraceAnalyzer.processProtocolClicked: Error processing flowchart %s' % fh)
table.setData(output)
self.ChR2.printSummary()
pl = []
for i in ['1', '2', '3']:
name = 'Scatter Plot%s' % i
pl.append(self.getElement(name, create=False))
self.ChR2.plotSummary(plotWidget=pl)
print '\nAnalysis of protocol finished'
def outputChanged(self):
if self.processCheck.isChecked():
self.processClicked()
class ConfLoader(DirTreeLoader):
def __init__(self, host, path):
self.host = host
DirTreeLoader.__init__(self, path)
def new(self):
print("new")
return True
def load(self, handle):
print('load %s' % str(handle))
def save(self, handle):
print('save %s' % str(handle))
class DataLoader(FileLoader):
def __init__(self, host, dm):
self.host = host
FileLoader.__init__(self, dm)
def getHost(self):
return self.host
def loadFile(self, files):
if len(files) != 1:
raise Exception('Must select exactly 1 protocol directory to load')
self.loaded = []
self.ui.fileTree.clear()
dh = files[0]
for fileName in dh.ls():
handle = dh[fileName]
self.loaded.append(handle)
#name = fh.name(relativeTo=self.ui.dirTree.baseDirHandle())
item = QtGui.QTreeWidgetItem([fileName])
item.file = handle
self.ui.fileTree.addTopLevelItem(item)
self.sigFileLoaded.emit(dh)
def selectedFileChanged(self):
sel = self.ui.fileTree.currentItem()
if sel is not None:
self.sigSelectedFileChanged.emit(sel.file)
|
|
"""description: workflow for inference downstream
"""
import os
import re
import h5py
import glob
import logging
import numpy as np
import pandas as pd
from ggr.analyses.linking import regions_to_genes_w_correlation_filtering
from ggr.analyses.bioinformatics import run_gprofiler
from ggr.util.utils import run_shell_cmd
from ggr.util.go_utils import GoParams
from ggr.util.go_utils import is_enriched
from tronn.util.utils import DataKeys
from tronn.util.formats import array_to_bed
from tronn.interpretation.syntax import analyze_multiplicity
from tronn.interpretation.syntax import analyze_syntax
from tronn.interpretation.syntax import recombine_syntax_results
def _setup_motifs_files(args):
"""convenience fn, make sure setup is same across
multiplicity/orientation/spacing workflows
"""
motifs_files = {}
motifs_files["early"] = "{}/{}/ggr.scanmotifs.h5".format(
args.inputs["inference"][args.cluster]["scanmotifs_dir"],
args.inputs["inference"][args.cluster]["scanmotifs_early_dir"])
motifs_files["mid"] = "{}/{}/ggr.scanmotifs.h5".format(
args.inputs["inference"][args.cluster]["scanmotifs_dir"],
args.inputs["inference"][args.cluster]["scanmotifs_mid_dir"])
motifs_files["late"] = "{}/{}/ggr.scanmotifs.h5".format(
args.inputs["inference"][args.cluster]["scanmotifs_dir"],
args.inputs["inference"][args.cluster]["scanmotifs_late_dir"])
return motifs_files
def _setup_signal_mats(args):
"""convenience fn, load signals
"""
# read in rna signals
rna_signal_file = args.outputs["data"][
"rna.counts.pc.expressed.timeseries_adj.pooled.rlog.dynamic.traj.mat"]
rna_signal_mat = pd.read_csv(rna_signal_file, sep="\t", index_col=0)
rna_signal_mat[:] = np.subtract(
rna_signal_mat.values,
np.expand_dims(rna_signal_mat.values[:,0], axis=-1))
# read in region signals
atac_signal_file = args.outputs["data"][
"atac.counts.pooled.rlog.dynamic.traj.mat"]
region_signal_mat = pd.read_csv(atac_signal_file, sep="\t", index_col=0)
region_signal_mat[:] = np.subtract(
region_signal_mat.values,
np.expand_dims(region_signal_mat.values[:,0], axis=-1))
region_signal_mat = region_signal_mat.drop("d05", axis=1)
return rna_signal_mat, region_signal_mat
def run_multiplicity_workflow(
args, prefix, sig_pwms_file, out_dir,
solo_filter=False, enrichments=True, plot=True):
"""for sig pwms, look at multiplicity
"""
# params/dirs
_MAX_COUNT = 5
_MIN_HIT_REGIONS = 50
OUT_DIR = "{}/{}".format(
args.outputs["results"]["inference"]["dir"], out_dir)
os.system("mkdir -p {}".format(OUT_DIR))
TMP_DIR = "{}/tmp".format(OUT_DIR)
os.system("mkdir -p {}".format(TMP_DIR))
# set up motifs files being used
motifs_files = _setup_motifs_files(args)
# read in the sig pwms
sig_pwms_trajs = pd.read_csv(sig_pwms_file, sep="\t", header=0, index_col=0)
sig_pwms = list(sig_pwms_trajs.index.values)
logging.info("{} sig pwms found".format(len(sig_pwms)))
# read in the list of all pwm names
max_val_key = DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_VAL
with h5py.File(motifs_files["early"], "r") as hf:
all_pwms = hf[max_val_key].attrs["pwm_names"]
num_pwms = len(all_pwms)
logging.info("{} total pwms".format(num_pwms))
# read in dynamic genes
filter_genes = pd.read_table(
args.outputs["data"][
"rna.counts.pc.expressed.timeseries_adj.pooled.rlog.dynamic.traj.mat"],
index_col=0).index.values.tolist()
# read in signals, other files/params
rna_signal_mat, region_signal_mat = _setup_signal_mats(args)
background_rna_file = args.outputs["data"]["rna.counts.pc.expressed.mat"]
links_file = args.outputs["results"]["linking"]["links.proximity"]
tss_file = args.outputs["annotations"]["tss.pc.bed"]
traj_to_group = args.inputs["inference"]["traj_to_group"]
signal_keys = args.inputs["inference"][args.cluster]["signal_keys"]
# results arrays
all_results = {}
for key in signal_keys:
all_results[key] = []
all_results["filt"] = []
all_results["num_regions"] = []
# analyze each pwm
keep_pwm_names = [] # attach to results h5 for plotting
keep_sig_pwm_names = [] # use for filtered sig pwms pattern file
for pwm_idx in range(len(sig_pwms)):
# name and global index
pwm_name = sig_pwms[pwm_idx]
pwm_name_clean = re.sub("HCLUST-\\d+_", "", pwm_name)
pwm_name_clean = re.sub(".UNK.0.A", "", pwm_name_clean)
pwm_global_idx = np.where(
[1 if pwm_name in global_name else 0
for global_name in all_pwms])[0][0]
print pwm_name_clean, pwm_global_idx
# reverse is in second half of indices
rc_idx = pwm_global_idx + num_pwms
pwm_indices = [pwm_global_idx, rc_idx]
print pwm_indices
# figure out which motif files to actually load
keys = []
trajs = sig_pwms_trajs.loc[pwm_name]
trajs = trajs[trajs != 0].index.values.tolist()
keys = list(set([traj_to_group[traj] for traj in trajs]))
sig_motifs_files = [motifs_files[key] for key in keys]
# TODO consider: loading traj mutatemotifs? would then need to pull the correct
# index to find the right mutate results
# get multiplicity
# filtering: looking for
results = analyze_multiplicity(
sig_motifs_files, pwm_indices, max_count=_MAX_COUNT, solo_filter=solo_filter)
filt = 0
if results is not None:
keep_pwm_names.append(pwm_name_clean)
# save to summary array
for key in signal_keys:
all_results[key].append(results[key]["count"])
all_results["num_regions"] = results["num_regions_per_count"]
# continue if not doing enrichments
if not enrichments:
filt = 1
max_enriched_thresh = _MAX_COUNT
else:
# per level, get region set and do gene set enrichments
hits_per_region = results["hits_per_region"]
max_enriched_thresh = 0
for count_thresh in range(1, _MAX_COUNT+1):
# get region ids
thresholded = hits_per_region[hits_per_region["hits"] >= count_thresh]
thresholded_metadata = thresholded.index.values
# check how many regions, do not continue if not enough regions
if thresholded_metadata.shape[0] < _MIN_HIT_REGIONS:
continue
# convert to BED
tmp_bed_file = "{}/{}.regions.count_thresh-{}.bed.gz".format(
TMP_DIR, pwm_name_clean, count_thresh)
if not os.path.isfile(tmp_bed_file):
array_to_bed(thresholded_metadata, tmp_bed_file, merge=False)
# get linked genes
tmp_genes_file = "{}/{}.linked_genes.count_thresh-{}.txt.gz".format(
TMP_DIR, pwm_name_clean, count_thresh)
if not os.path.isfile(tmp_genes_file):
regions_to_genes_w_correlation_filtering(
tmp_bed_file,
links_file,
tss_file,
tmp_genes_file,
region_signal_mat,
rna_signal_mat,
filter_by_score=0.5,
filter_genes=filter_genes,
corr_thresh=0,
pval_thresh=1)
# do not continue if no linked genes
if not os.path.isfile(tmp_genes_file):
continue
linked_genes = pd.read_csv(tmp_genes_file, sep="\t", header=0)
if linked_genes.shape[0] == 0:
continue
# run enrichment calculation
enrichment_file = "{}/{}.linked_genes.count_thresh-{}.go_gprofiler.txt".format(
TMP_DIR, pwm_name_clean, count_thresh)
if not os.path.isfile(enrichment_file):
run_gprofiler(
tmp_genes_file,
background_rna_file,
TMP_DIR,
ordered=True)
# check if any enrichment, if not then continue
if not is_enriched(enrichment_file):
continue
# if passed all of this, move results to not tmp
os.system("cp {} {}".format(
enrichment_file, OUT_DIR))
# update important variables
filt = 1
max_enriched_thresh = count_thresh
if max_enriched_thresh >= 2:
keep_sig_pwm_names.append(pwm_name)
# add in filt
all_results["filt"].append(filt)
# check in
print all_results["filt"], np.sum(all_results["filt"])
print keep_sig_pwm_names, len(keep_sig_pwm_names)
# save out summary array
h5_results_file = "{}/genome.homotypic.multiplicity_v_activity.h5".format(OUT_DIR)
if not os.path.isfile(h5_results_file):
filt = np.array(all_results["filt"])
for key in signal_keys:
summary_results = np.stack(all_results[key], axis=0)
with h5py.File(h5_results_file, "a") as out:
out_key = "{}/counts".format(key)
out.create_dataset(out_key, data=summary_results)
out[out_key].attrs["pwm_names"] = keep_pwm_names
out_key = "{}/filt".format(key)
out.create_dataset(out_key, data=filt)
out[out_key].attrs["pwm_names"] = keep_pwm_names
num_regions_per_count = np.stack(all_results["num_regions_per_count"], axis=0)
with h5py.File(h5_results_file, "a") as out:
out_key = "{}/num_regions".format(key)
out.create_dataset(out_key, data=num_regions_per_count)
out[out_key].attrs["pwm_names"] = keep_pwm_names
# save out new sig pwms file
new_sig_pwms_file = "{}/{}.multiplicity_filt.txt.gz".format(
OUT_DIR,
os.path.basename(sig_pwms_file).split(".txt")[0])
new_sig_pwms = sig_pwms_trajs.loc[keep_sig_pwm_names]
print new_sig_pwms
new_sig_pwms.to_csv(new_sig_pwms_file, sep="\t", compression="gzip")
# and plot
# TODO move this plot outside to make easier to adjust?
if plot:
plot_cmd = "{}/plot.results.multiplicity.summary.R {} {}/genome.homotypic.multiplicity TRUE {}".format(
"~/git/ggr-project/figs/fig_4.homotypic", h5_results_file,
args.outputs["results"]["inference"]["dir"],
" ".join(signal_keys))
print plot_cmd
os.system(plot_cmd)
return new_sig_pwms_file
def run_syntax_workflow(args, prefix, sig_pwms_file, out_dir):
"""for sig pwms, look at multiplicity
"""
# params
_MIN_HIT_REGIONS = 50
# dirs
OUT_DIR = "{}/{}".format(args.outputs["results"]["inference"]["dir"], out_dir)
os.system("mkdir -p {}".format(OUT_DIR))
TMP_DIR = "{}/tmp".format(OUT_DIR)
os.system("mkdir -p {}".format(TMP_DIR))
# set up motifs files being used
motifs_files = _setup_motifs_files(args)
# read in the sig pwms
sig_pwms_trajs = pd.read_csv(sig_pwms_file, sep="\t", header=0, index_col=0)
sig_pwms = list(sig_pwms_trajs.index.values)
logging.info("{} sig pwms found".format(len(sig_pwms)))
# read in the list of all pwm names
max_val_key = DataKeys.WEIGHTED_PWM_SCORES_POSITION_MAX_VAL
with h5py.File(motifs_files["early"], "r") as hf:
all_pwms = hf[max_val_key].attrs["pwm_names"]
num_pwms = len(all_pwms)
logging.info("{} total pwms".format(num_pwms))
# read in dynamic genes
filter_genes = pd.read_table(
args.outputs["data"][
"rna.counts.pc.expressed.timeseries_adj.pooled.rlog.dynamic.traj.mat"],
index_col=0).index.values.tolist()
# read in signals, other files/params
rna_signal_mat, region_signal_mat = _setup_signal_mats(args)
background_rna_file = args.outputs["data"]["rna.counts.pc.expressed.mat"]
links_file = args.outputs["results"]["linking"]["links.proximity"]
tss_file = args.outputs["annotations"]["tss.pc.bed"]
traj_to_group = args.inputs["inference"]["traj_to_group"]
signal_keys = args.inputs["inference"][args.cluster]["signal_keys"]
# results arrays
all_results = {}
for key in signal_keys:
all_results[key] = []
all_results["filt"] = []
# analyze each pwm
keep_pwm_names = [] # attach to results h5 for plotting
keep_sig_pwm_names = [] # use for filtered sig pwms pattern file
for pwm_idx in range(len(sig_pwms)):
# name and global index
pwm_name = sig_pwms[pwm_idx]
pwm_name_clean = re.sub("HCLUST-\\d+_", "", pwm_name)
pwm_name_clean = re.sub(".UNK.0.A", "", pwm_name_clean)
pwm_global_idx = np.where(
[1 if pwm_name in global_name else 0
for global_name in all_pwms])[0][0]
print pwm_name_clean, pwm_global_idx
# reverse is in second half of indices
rc_idx = pwm_global_idx + num_pwms
pwm_indices = [pwm_global_idx, rc_idx]
print pwm_indices
# figure out which motif files to actually load
keys = []
trajs = sig_pwms_trajs.loc[pwm_name]
trajs = trajs[trajs != 0].index.values.tolist()
keys = list(set([traj_to_group[traj] for traj in trajs]))
sig_motifs_files = [motifs_files[key] for key in keys]
# TODO consider: loading traj mutatemotifs? would then need to pull the correct
# index to find the right mutate results
results = {}
# go through orientations
possible_orientations = ["FWD", "REV"]
for orientation_a in possible_orientations:
for orientation_b in possible_orientations:
# set up new pwm indices
pwm_indices = []
# figure out whether fwd or rev
if orientation_a == "FWD":
pwm_indices.append(pwm_global_idx)
elif orientation_a == "REV":
pwm_indices.append(rc_idx)
# figure out whether fwd or rev
if orientation_b == "FWD":
pwm_indices.append(pwm_global_idx)
elif orientation_b == "REV":
pwm_indices.append(rc_idx)
assert len(pwm_indices) == 2
results_key = "{}_{}".format(orientation_a, orientation_b)
print results_key, pwm_indices
# build aligned array around first pwm
aligned_results = analyze_syntax(
sig_motifs_files, pwm_indices, solo_filter=True)
if aligned_results is None:
continue
if orientation_b == "FWD":
results[results_key] = aligned_results[pwm_global_idx]
elif orientation_b == "REV":
results[results_key] = aligned_results[rc_idx]
# 1) AGNOSTIC: both(anchor)_x_both - merge all
# 2) AGNOS_IN: both(anchor)_x_in - fwd_REV(+) + rev_REV(+) + fwd_FWD(-) + rev_FWD(-)
# 3) AGNOS_OUT: both(anchor)_x_out - fwd_FWD(+) + rev_FWD(+) + fwd_REV(-) + rev_REV(-)
# 4) SAME_DIR: same_x_same - fwd_FWD + rev_REV
# 5) IN: in_x_in - fwd_REV(+) + rev_FWD(-)
# 6) OUT: out_x_out - rev_FWD(+) + fwd_REV(-)
# NOTE: ignoring rev_BOTH and fwd_BOTH - same as 2,3
adjustments = {
"BOTH_BOTH": (
["FWD_FWD", "FWD_REV", "REV_FWD", "REV_REV"],
[".", ".", ".", "."]),
"BOTH_IN": (
["FWD_FWD", "FWD_REV", "REV_FWD", "REV_REV"],
["-", "+", "-", "+"]),
"BOTH_OUT": (
["FWD_FWD", "FWD_REV", "REV_FWD", "REV_REV"],
["+", "-", "+", "-"]),
"SAME": (
["FWD_FWD", "REV_REV"],
[".", "."]),
"IN": (
["FWD_REV", "REV_FWD"],
["+", "-"]),
"OUT": (
["FWD_REV", "REV_FWD"],
["-", "+"])}
results_adjusted = {}
enrichments_summary = None
for adj_key in sorted(adjustments.keys()):
# adjust for correct patterns
results_adjusted[adj_key] = recombine_syntax_results(
results,
adjustments[adj_key][0],
adjustments[adj_key][1],
signal_keys)
# don't continue if does not exist or does not have enough regions
if len(results_adjusted[adj_key].keys()) == 0:
continue
if results_adjusted[adj_key]["scores"].shape[0] < _MIN_HIT_REGIONS:
continue
# TODO consider spacing here somehow?
# debug
print adj_key, results_adjusted[adj_key]["scores"].shape[0]
# convert to BED
tmp_bed_file = "{}/{}.regions.oriented.{}.bed.gz".format(
TMP_DIR, pwm_name_clean, adj_key)
if not os.path.isfile(tmp_bed_file):
array_to_bed(
results_adjusted[adj_key]["scores"].index.values,
tmp_bed_file, merge=False)
# get linked genes
tmp_genes_file = "{}/{}.linked_genes.oriented.{}.txt.gz".format(
TMP_DIR, pwm_name_clean, adj_key)
if not os.path.isfile(tmp_genes_file):
regions_to_genes_w_correlation_filtering(
tmp_bed_file,
links_file,
tss_file,
tmp_genes_file,
region_signal_mat,
rna_signal_mat,
filter_by_score=0.5,
filter_genes=filter_genes,
corr_thresh=0,
pval_thresh=1)
# do not continue if no linked genes
linked_genes = pd.read_csv(tmp_genes_file, sep="\t", header=0)
if linked_genes.shape[0] == 0:
continue
# run enrichment calculation
enrichment_file = "{}/{}.linked_genes.oriented.{}.go_gprofiler.txt".format(
TMP_DIR, pwm_name_clean, adj_key)
if not os.path.isfile(enrichment_file):
run_gprofiler(
tmp_genes_file,
background_rna_file,
TMP_DIR,
ordered=True)
# check if any enrichment, if not then continue
if not is_enriched(enrichment_file):
continue
# read in file and clean
syntax_enrichments = pd.read_csv(enrichment_file, sep="\t")
syntax_enrichments = syntax_enrichments[syntax_enrichments["domain"] == "BP"]
syntax_enrichments = syntax_enrichments[
["term.id", "p.value", "term.name"]]
syntax_enrichments["syntax"] = adj_key
print "term count:", syntax_enrichments.shape[0]
# add to summary
if enrichments_summary is None:
enrichments_summary = syntax_enrichments.copy()
else:
enrichments_summary = pd.concat([enrichments_summary, syntax_enrichments], axis=0)
enrichments_summary = enrichments_summary.sort_values("term.name")
# if passed all of this, move results to not tmp
os.system("cp {} {}".format(
enrichment_file, OUT_DIR))
# and now plot: plot prefix
plot_prefix = "{}/{}.{}".format(
TMP_DIR, pwm_name_clean, adj_key)
# plot pwm scores
score_spacing_distr_file = "{}.genome.active_pwm_scores.avg.txt.gz".format(
plot_prefix)
aligned_scores = results_adjusted[adj_key]["scores"]
num_examples = aligned_scores.shape[0]
aligned_scores = aligned_scores.mean(axis=0).transpose()
aligned_scores.loc[0] = 0
aligned_scores = aligned_scores.divide(aligned_scores.sum()).reset_index()
aligned_scores.columns = ["position", "active"]
aligned_scores.to_csv(
score_spacing_distr_file,
sep="\t", header=True, index=False, compression="gzip")
plot_cmd = "plot.homotypic.spacing.freq.indiv.R {} {} {}".format(
score_spacing_distr_file, num_examples, plot_prefix)
print plot_cmd
os.system(plot_cmd)
# plot signals
for signal_key in signal_keys:
signal_prefix = "{}.{}".format(plot_prefix, signal_key)
tasks = results_adjusted[adj_key][signal_key].keys()
for task in tasks:
task_prefix = "{}.{}".format(signal_prefix, task)
# save out
out_file = "{}.genome.txt.gz".format(
task_prefix)
results_adjusted[adj_key][signal_key][task].to_csv(
out_file,
sep="\t", header=True, index=False, compression="gzip")
# plot
plot_cmd = "plot.homotypic.spacing.signals.indiv.R {} {}".format(
out_file, task_prefix)
print plot_cmd
os.system(plot_cmd)
# continue if nothing enriched
if enrichments_summary is None:
continue
# clean up summary
enrichments_summary["log10pval"] = -np.log10(enrichments_summary["p.value"].values)
enrichments_summary = enrichments_summary.drop("p.value", axis=1)
summary_file = "{}/{}.summary.txt.gz".format(OUT_DIR, pwm_name_clean)
enrichments_summary.to_csv(summary_file, sep="\t", index=False, header=True, compression="gzip")
# TODO adjust cleanup
if False:
# remove substrings
for substring in REMOVE_SUBSTRINGS:
keep = [False if substring in term else True
for term in enrichments_summary["term.name"]]
keep_indices = np.where(keep)[0]
enrichments_summary = enrichments_summary.iloc[keep_indices]
# remove exact strings
keep = [False if term in REMOVE_EXACT_STRINGS else True
for term in enrichments_summary["term.name"]]
keep_indices = np.where(keep)[0]
enrichments_summary = enrichments_summary.iloc[keep_indices]
enrichments_summary = enrichments_summary.sort_values(["term.name", "log10pval"])
summary_file = "{}/{}.summary.filt.txt.gz".format(OUT_DIR, pwm_name_clean)
enrichments_summary.to_csv(summary_file, sep="\t", index=False, header=True, compression="gzip")
return None
def runall(args, prefix):
"""workflows for nn postprocessing
"""
# set up logging, files, folders
logger = logging.getLogger(__name__)
logger.info("WORKFLOW: run inference on NN models")
# set up data and results dir
data_dir = args.outputs["data"]["dir"]
run_shell_cmd("mkdir -p {}".format(data_dir))
out_data = args.outputs["data"]
results_dirname = "inference"
results_dir = "{}/{}".format(args.outputs["results"]["dir"], results_dirname)
args.outputs["results"][results_dirname] = {"dir": results_dir}
run_shell_cmd("mkdir -p {}".format(results_dir))
out_results = args.outputs["results"][results_dirname]
# -------------------------------------------
# NN ANALYSIS - scanmotifs, get differential
# input: dynamic traj data
# output: differential motifs, scanned examples
# -------------------------------------------
# TRONN: scanmotifs
# TRONN: call_differential_motifs
# TRONN: intersect_pwm_x_rna
# sig pwms file
sig_pwms = "{}/{}".format(
args.inputs["inference"][args.cluster]["sig_pwms_dir"],
args.inputs["inference"][args.cluster]["sig_pwms.rna_filt.corr_filt"])
# -------------------------------------------
# ANALYSIS - homotypic - look at multiplicity
# input: scanmotifs files
# output: syntax results
# -------------------------------------------
# observing multiplicity (homotypic clusters, no higher syntax) in the genome
# 1) across dynamic instances, do we see homotypic clusters of motifs?
# do not use solo filter, just care about if we see them dispersed in dynamic
# regulatory regions. NOTE that this will give you GO enrichments
# ANSWER: yes.
# output: heatmap of counts of each instance
analysis_dir = "homotypic.dynamic.all.multiplicity.TMP"
if not os.path.isdir(analysis_dir):
# TODO focus on results file (to split out plotting)?
run_multiplicity_workflow(
args, prefix, sig_pwms, analysis_dir,
solo_filter=False, enrichments=True, plot=False)
quit()
# 2) can they drive accessibility alone?
# ANSWER yes, in some cases
analysis_dir = "homotypic.dynamic.single_motif_attribution.multiplicity"
#if not os.path.isdir(multiplicity_dir):
# run_multiplicity_workflow(args, prefix, sig_pwms, analysis_dir, solo_filter=True, plot=False, enrichments=False)
# 3) confirm with simulations
# NN ANALYSIS - run multiplicity simulations
# determine thresholds for activation AND which ones activate accessibility alone
# NOTE that simulations provide the threshold (manually collect) and ability to drive accessibility alone
# -------------------------------------------
# ANALYSIS - homotypic - look at orientation/spacing
# input: scanmotifs files
# output: syntax results
# -------------------------------------------
# observing orientation/spacing in the genome
# 1) across dynamic instances, do we see orientation/spacing constraints (leading
# to accessibility/modification differences) on motifs? do they drive accessibility alone?
# use solo filter, this is the only way to remove effect of other TFs
# use enrichment, which then tells us if this orientation/spacing matters
# ANSWER: no to orientation, soft spacing constraint. yes they drive accessibility alone in some cases
analysis_dir = "homotypic.dynamic.single_motif_attribution.orientation_spacing"
if not os.path.isdir(orientation_spacing_dir):
run_syntax_workflow(args, prefix, sig_pwms, analysis_dir, solo_filter=True)
# 2) confirm with simulations
# NN ANALYSIS - run orientation/spacing simulations
# use to determin spacing constraints AND which ones activate accessibility alone
# 3) rerun with all regions (ex GRHL)
# TODO build a manual summary: tells an aggregator fn which ones to grab
# -------------------------------------------
# ANALYSIS - homotypic - produce motif to gene set to gene function plots
# input: scanmotifs files
# output: gene function chart
# -------------------------------------------
# first, determine if multiplicity/orientation/spacing are enriched in the genome for each motif
# given above, we conclude that multiplicity is a thing, orientation not, spacing yes
# also use sims to give us predicted threshs and which ones activate on their own
# (hence can drive downstream function)
# build a manual summary file that will grab the appropriate gene function chart
# -------------------------------------------
# ANALYSIS - heterotypic - buildgrammars
# input: mutatemotifs outputs
# output: grammars
# -------------------------------------------
# tronn: run mutatemotifs using sig pwms above
# tronn: build grammars
# build 3s, but only consider 2s for main figure?
# still need a way to filter 3s to fit in supplements
# maybe higher threshold for 3s?
# or need a different test for 3s to get enrichment
# filter grammars
return args
|
|
from ctypes import *
import unittest
import sys, struct
def valid_ranges(*types):
# given a sequence of numeric types, collect their _type_
# attribute, which is a single format character compatible with
# the struct module, use the struct module to calculate the
# minimum and maximum value allowed for this format.
# Returns a list of (min, max) values.
result = []
for t in types:
fmt = t._type_
size = struct.calcsize(fmt)
a = struct.unpack(fmt, ("\x00"*32)[:size])[0]
b = struct.unpack(fmt, ("\xFF"*32)[:size])[0]
c = struct.unpack(fmt, ("\x7F"+"\x00"*32)[:size])[0]
d = struct.unpack(fmt, ("\x80"+"\xFF"*32)[:size])[0]
result.append((min(a, b, c, d), max(a, b, c, d)))
return result
ArgType = type(byref(c_int(0)))
unsigned_types = [c_ubyte, c_ushort, c_uint, c_ulong]
signed_types = [c_byte, c_short, c_int, c_long, c_longlong]
float_types = [c_double, c_float]
try:
c_ulonglong
c_longlong
except NameError:
pass
else:
unsigned_types.append(c_ulonglong)
signed_types.append(c_longlong)
unsigned_ranges = valid_ranges(*unsigned_types)
signed_ranges = valid_ranges(*signed_types)
################################################################
class NumberTestCase(unittest.TestCase):
def test_default_init(self):
# default values are set to zero
for t in signed_types + unsigned_types + float_types:
self.failUnlessEqual(t().value, 0)
def test_unsigned_values(self):
# the value given to the constructor is available
# as the 'value' attribute
for t, (l, h) in zip(unsigned_types, unsigned_ranges):
self.failUnlessEqual(t(l).value, l)
self.failUnlessEqual(t(h).value, h)
def test_signed_values(self):
# see above
for t, (l, h) in zip(signed_types, signed_ranges):
self.failUnlessEqual(t(l).value, l)
self.failUnlessEqual(t(h).value, h)
def test_typeerror(self):
# Only numbers are allowed in the contructor,
# otherwise TypeError is raised
for t in signed_types + unsigned_types + float_types:
self.assertRaises(TypeError, t, "")
self.assertRaises(TypeError, t, None)
## def test_valid_ranges(self):
## # invalid values of the correct type
## # raise ValueError (not OverflowError)
## for t, (l, h) in zip(unsigned_types, unsigned_ranges):
## self.assertRaises(ValueError, t, l-1)
## self.assertRaises(ValueError, t, h+1)
def test_from_param(self):
# the from_param class method attribute always
# returns PyCArgObject instances
for t in signed_types + unsigned_types + float_types:
self.failUnlessEqual(ArgType, type(t.from_param(0)))
def test_byref(self):
# calling byref returns also a PyCArgObject instance
for t in signed_types + unsigned_types + float_types:
parm = byref(t())
self.failUnlessEqual(ArgType, type(parm))
def test_floats(self):
# c_float and c_double can be created from
# Python int, long and float
for t in float_types:
self.failUnlessEqual(t(2.0).value, 2.0)
self.failUnlessEqual(t(2).value, 2.0)
self.failUnlessEqual(t(2L).value, 2.0)
def test_integers(self):
# integers cannot be constructed from floats
for t in signed_types + unsigned_types:
self.assertRaises(TypeError, t, 3.14)
def test_sizes(self):
for t in signed_types + unsigned_types + float_types:
size = struct.calcsize(t._type_)
# sizeof of the type...
self.failUnlessEqual(sizeof(t), size)
# and sizeof of an instance
self.failUnlessEqual(sizeof(t()), size)
def test_alignments(self):
for t in signed_types + unsigned_types + float_types:
code = t._type_ # the typecode
align = struct.calcsize("c%c" % code) - struct.calcsize(code)
# alignment of the type...
self.failUnlessEqual((code, alignment(t)),
(code, align))
# and alignment of an instance
self.failUnlessEqual((code, alignment(t())),
(code, align))
def test_int_from_address(self):
from array import array
for t in signed_types + unsigned_types:
# the array module doesn't suppport all format codes
# (no 'q' or 'Q')
try:
array(t._type_)
except ValueError:
continue
a = array(t._type_, [100])
# v now is an integer at an 'external' memory location
v = t.from_address(a.buffer_info()[0])
self.failUnlessEqual(v.value, a[0])
self.failUnlessEqual(type(v), t)
# changing the value at the memory location changes v's value also
a[0] = 42
self.failUnlessEqual(v.value, a[0])
def test_float_from_address(self):
from array import array
for t in float_types:
a = array(t._type_, [3.14])
v = t.from_address(a.buffer_info()[0])
self.failUnlessEqual(v.value, a[0])
self.failUnless(type(v) is t)
a[0] = 2.3456e17
self.failUnlessEqual(v.value, a[0])
self.failUnless(type(v) is t)
def test_char_from_address(self):
from ctypes import c_char
from array import array
a = array('c', 'x')
v = c_char.from_address(a.buffer_info()[0])
self.failUnlessEqual(v.value, a[0])
self.failUnless(type(v) is c_char)
a[0] = '?'
self.failUnlessEqual(v.value, a[0])
def test_init(self):
# c_int() can be initialized from Python's int, and c_int.
# Not from c_long or so, which seems strange, abd should
# probably be changed:
self.assertRaises(TypeError, c_int, c_long(42))
## def test_perf(self):
## check_perf()
from ctypes import _SimpleCData
class c_int_S(_SimpleCData):
_type_ = "i"
__slots__ = []
def run_test(rep, msg, func, arg=None):
## items = [None] * rep
items = range(rep)
from time import clock
if arg is not None:
start = clock()
for i in items:
func(arg); func(arg); func(arg); func(arg); func(arg)
stop = clock()
else:
start = clock()
for i in items:
func(); func(); func(); func(); func()
stop = clock()
print "%15s: %.2f us" % (msg, ((stop-start)*1e6/5/rep))
def check_perf():
# Construct 5 objects
from ctypes import c_int
REP = 200000
run_test(REP, "int()", int)
run_test(REP, "int(999)", int)
run_test(REP, "c_int()", c_int)
run_test(REP, "c_int(999)", c_int)
run_test(REP, "c_int_S()", c_int_S)
run_test(REP, "c_int_S(999)", c_int_S)
# Python 2.3 -OO, win2k, P4 700 MHz:
#
# int(): 0.87 us
# int(999): 0.87 us
# c_int(): 3.35 us
# c_int(999): 3.34 us
# c_int_S(): 3.23 us
# c_int_S(999): 3.24 us
# Python 2.2 -OO, win2k, P4 700 MHz:
#
# int(): 0.89 us
# int(999): 0.89 us
# c_int(): 9.99 us
# c_int(999): 10.02 us
# c_int_S(): 9.87 us
# c_int_S(999): 9.85 us
if __name__ == '__main__':
## check_perf()
unittest.main()
|
|
from Quartz import *
import Quartz
import Utilities
import sys
def drawQuartzRomanText(context):
text = "Quartz"
textlen = len(text)
fontSize = 60
opaqueBlack = [0.0, 0.0, 0.0, 1.0]
opaqueRed = [0.663, 0.0, 0.031, 1.0]
# Set the fill color space. This sets the
# fill painting color to opaque black.
CGContextSetFillColorSpace(context,
Utilities.getTheCalibratedRGBColorSpace())
# The Cocoa framework calls the draw method with an undefined
# value of the text matrix. It's best to set it to what is needed by
# this code: the identity transform.
CGContextSetTextMatrix(context, CGAffineTransformIdentity)
# Set the font with the PostScript name "Times-Roman", at
# fontSize points, with the MacRoman encoding.
CGContextSelectFont(context, "Times-Roman", fontSize, kCGEncodingMacRoman)
# The default text drawing mode is fill. Draw the text at (70, 400).
CGContextShowTextAtPoint(context, 70, 400, text, textlen)
# Set the fill color to red.
CGContextSetFillColor(context, opaqueRed)
# Draw the next piece of text where the previous one left off.
CGContextShowText(context, text, textlen)
for i in range(3):
# Get the current text pen position.
p = CGContextGetTextPosition(context)
# Translate to the current text pen position.
CGContextTranslateCTM(context, p.x, p.y)
# Rotate clockwise by 90 degrees for the next
# piece of text.
CGContextRotateCTM(context, Utilities.DEGREES_TO_RADIANS(-90))
# Draw the next piece of text in blac at the origin.
CGContextSetFillColor(context, opaqueBlack)
CGContextShowTextAtPoint(context, 0, 0, text, textlen)
# Draw the next piece of text where the previous piece
# left off and paint it with red.
CGContextSetFillColor(context, opaqueRed)
CGContextShowText(context, text, textlen)
def myCGContextStrokeLineSegments(context, s, count):
# CGContextStrokeLineSegments is available only on Tiger and later
# so if it isn't available, use an emulation of
# CGContextStrokeLineSegments. It is better to use the
# built-in CGContextStrokeLineSegments since it has significant
# performance optimizations on some hardware.
if hasattr(Quartz, 'CGContextStrokeLineSegments'):
CGContextStrokeLineSegments(context, s, count)
else:
CGContextBeginPath(context)
for k in xrange(0, count, 2):
CGContextMoveToPoint(context, s[k].x, s[k].y)
CGContextAddLineToPoint(context, s[k+1].x, s[k+1].y)
CGContextStrokePath(context)
_gridLines = []
def drawGridLines(context):
numlines = 60
if not _gridLines:
stepsize = 4.0
val = 0
for i in xrange(0, 2*numlines, 2):
_gridLines.append(CGPointMake(val, -60))
_gridLines.append(CGPointMake(val, 200))
val += stepsize
val = -20
for i in xrange(2*numlines, 4*numlines, 2):
_gridLines.append(CGPointMake(0, val))
_gridLines.append(CGPointMake(400, val))
val += stepsize
myCGContextStrokeLineSegments(context, _gridLines, len(_gridLines))
def drawQuartzTextWithTextModes(context):
fillText = "Fill "
strokeText = "Stroke "
fillAndStrokeText = "FillStroke "
invisibleText = "Invisible "
clipText = "ClipText "
fillStrokeClipText = "FillStrokeClip "
fontSize = 40.0
extraLeading = 5.0
dash = (1,1)
opaqueRed = (1.0, 0.0, 0.0, 1.0)
# Set the fill and stroke color space. This sets the
# fill and stroke painting color to opaque black.
CGContextSetFillColorSpace(context,
Utilities.getTheCalibratedRGBColorSpace())
CGContextSetStrokeColorSpace(context,
Utilities.getTheCalibratedRGBColorSpace())
# The Cocoa framework calls the draw method with an undefined
# value of the text matrix. It's best to set it to what is needed by
# this code: the identity transform.
CGContextSetTextMatrix(context, CGAffineTransformIdentity)
# Set the font with the PostScript name "Times-Roman", at
# fontSize points, with the MacRoman encoding.
CGContextSelectFont(context, "Times-Roman", fontSize, kCGEncodingMacRoman)
# ---- Text Line 1 ----
# Default text drawing mode is fill. Draw the text at (10, 400).
CGContextShowTextAtPoint(context, 10, 400, fillText, len(fillText))
# Set the fill color to red.
CGContextSetFillColor(context, opaqueRed)
CGContextSetTextPosition(context, 180, 400)
CGContextShowText(context, fillText, len(fillText))
# Translate down for the next line of text.
CGContextTranslateCTM(context, 0, -(fontSize + extraLeading))
# ---- Text Line 2 ----
# Now stroke the text by setting the text drawing mode
# to kCGTextStroke. When stroking text, Quartz uses the stroke
# color in the graphics state.
CGContextSetTextDrawingMode(context, kCGTextStroke)
CGContextShowTextAtPoint(context, 10, 400, strokeText, len(strokeText))
# When stroking text, the line width and other gstate parameters
# that affect stroking affect text stroking as well.
CGContextSetLineWidth(context, 2)
CGContextSetLineDash(context, 0, dash, 2)
CGContextSetTextPosition(context, 180, 400)
CGContextShowText(context, strokeText, len(strokeText))
# Reset the line dash and line width to their defaults.
CGContextSetLineDash(context, 0, None, 0)
CGContextSetLineWidth(context, 1)
# Translate down for the next line of text.
CGContextTranslateCTM(context, 0, -(fontSize + extraLeading))
# ---- Text Line 3 ----
# Set the text drawing mode so that text is both filled and
# stroked. This produces text that is filled with the fill
# color and stroked with the stroke color.
CGContextSetTextDrawingMode(context, kCGTextFillStroke)
CGContextShowTextAtPoint(context, 10, 400,
fillAndStrokeText, len(fillAndStrokeText))
# Now draw again with a thicker stroke width.
CGContextSetLineWidth(context, 2)
CGContextSetTextPosition(context, 180, 400)
CGContextShowText(context, fillAndStrokeText, len(fillAndStrokeText))
CGContextSetLineWidth(context, 1)
CGContextTranslateCTM(context, 0, -(fontSize + extraLeading))
# ---- Text Line 4 ----
# Set the text drawing mode to invisible so that the next piece of
# text does not appear. Quartz updates the text position as
# if it had been drawn.
CGContextSetTextDrawingMode(context, kCGTextInvisible)
CGContextShowTextAtPoint(context, 10, 400,
invisibleText, len(invisibleText))
CGContextSetTextDrawingMode(context, kCGTextFill)
CGContextSetTextPosition(context, 180, 400)
CGContextShowText(context, fillText, len(fillText))
CGContextTranslateCTM(context, 0, -(fontSize + extraLeading))
# ---- Text Line 5 ----
CGContextSaveGState(context)
if 1:
# Use the text as a clipping path.
CGContextSetTextDrawingMode(context, kCGTextClip)
CGContextShowTextAtPoint(context, 10, 400, clipText, len(clipText))
# Position and draw a grid of lines.
CGContextTranslateCTM(context, 10, 400)
drawGridLines(context)
CGContextRestoreGState(context)
CGContextSaveGState(context)
if 1:
# The current text position is that after the last piece
# of text has been drawn. Since CGContextSaveGState/
# CGContextRestoreGState do not affect the text position or
# the text matrix, the text position is that after the last
# text was "drawn", that drawn with the kCGTextClip mode
# above. This is where the next text drawn will go if it
# isn't explicitly positioned.
nextTextPosition = CGContextGetTextPosition(context)
# Draw so that the text is filled, stroked, and then used
# the clip subsequent drawing.
CGContextSetTextDrawingMode(context, kCGTextFillStrokeClip)
# Explicitly set the text position.
CGContextSetTextPosition(context, 180, 400)
nextTextPosition = CGContextGetTextPosition(context)
CGContextShowText(context, fillStrokeClipText, len(fillStrokeClipText))
# Adjust the location of the grid lines so that they overlap the
# text just drawn.
CGContextTranslateCTM(context, nextTextPosition.x, nextTextPosition.y)
# Draw the grid lines clipped by the text.
drawGridLines(context)
CGContextRestoreGState(context)
# showFlippedTextAtPoint is a cover routine for CGContextShowText
# that is useful for drawing text in a coordinate system where the y axis
# is flipped relative to the default Quartz coordinate system.
#
# This code assumes that the text matrix is only used to
# flip the text, not to perform scaling or any other
# possible use of the text matrix.
#
# This function preserves the a, b, c, and d components of
# the text matrix across its execution but updates the
# tx, ty components (the text position) to reflect the
# text just drawn. If all the text you draw is flipped, it
# isn't necessary to continually set the text matrix. Instead
# you could simply call CGContextSetTextMatrix once with
# the flipped matrix each time your drawing
# code is called.
def showFlippedTextAtPoint(c, x, y, text, textLen):
t = CGAffineTransform(1.0, 0.0, 0.0, -1.0, 0.0, 0.0)
# Get the existing text matrix.
s = CGContextGetTextMatrix(c)
# Set the text matrix to the one that flips in y.
CGContextSetTextMatrix(c, t)
# Draw the text at the point.
CGContextShowTextAtPoint(c, x, y, text, textLen)
# Get the updated text position.
p = CGContextGetTextPosition(c)
# Update the saved text matrix to reflect the updated
# text position.
s.tx = p.x ; s.ty = p.y
# Reset to the text matrix in effect when this
# routine was called but with the text position updated.
CGContextSetTextMatrix(c, s)
def drawQuartzTextWithTextMatrix(context):
fontSize = 60.0
extraLeading = 10.0
text = "Quartz "
textlen = len(text)
# The Cocoa framework calls the draw method with an undefined
# value of the text matrix. It's best to set it to what is needed by
# this code. Initially that is the identity transform.
CGContextSetTextMatrix(context, CGAffineTransformIdentity)
# Set the font with the PostScript name "Times-Roman", at
# fontSize points, with the MacRoman encoding.
CGContextSelectFont(context, "Times-Roman", fontSize, kCGEncodingMacRoman)
# ---- Text Line 1 ----
# Draw the text at (10, 600).
CGContextShowTextAtPoint(context, 10, 600, text, textlen)
# Get the current text position. The text pen is at the trailing
# point from the text just drawn.
textPosition = CGContextGetTextPosition(context)
# Set the text matrix to one that flips text in y and sets
# the text position to the user space coordinate (0,0).
t = CGAffineTransformMake(1, 0, 0, -1, 0, 0)
CGContextSetTextMatrix(context, t)
# Set the text position to the point where the previous text ended.
CGContextSetTextPosition(context, textPosition.x, textPosition.y)
# Draw the text at the current text position. It will be drawn
# flipped in y, relative to the text drawn previously.
CGContextShowText(context, text, textlen)
# ---- Text Line 2 ----
# Translate down for the next piece of text.
CGContextTranslateCTM(context, 0, -(3*fontSize + extraLeading))
CGContextSaveGState(context)
if 1:
# Change the text matrix to {1, 0, 0, 3, 0, 0}, which
# scales text by a factor of 1 in x and 3 in y.
# This scaling doesn't affect any drawing other than text
# drawing since only text drawing is transformed by
# the text matrix.
t = CGAffineTransformMake(1, 0, 0, 3, 0, 0)
CGContextSetTextMatrix(context, t)
# This text is scaled relative to the previous text
# because of the text matrix scaling.
CGContextShowTextAtPoint(context, 10, 600, text, textlen)
# This restores the graphics state to what it was at the time
# of the last CGContextSaveGState, but since the text matrix
# isn't part of the Quartz graphics state, it isn't affected.
CGContextRestoreGState(context)
# The text matrix isn't affected by CGContextSaveGState and
# CGContextRestoreGState. You can see this by observing that
# the next text piece appears immediately after the first piece
# and with the same text scaling as that text drawn with the
# text matrix established before we did CGContextRestoreGState.
CGContextShowText(context, text, textlen)
# ---- Text Line 3 ----
# Translate down for the next piece of text.
CGContextTranslateCTM(context, 0, -(fontSize + extraLeading))
# Reset the text matrix to the identity matrix.
CGContextSetTextMatrix(context, CGAffineTransformIdentity)
# Now draw text in a flipped coordinate system.
CGContextSaveGState(context)
if 1:
# Flip the coordinate system to mimic a coordinate system with the origin
# at the top-left corner of a window. The new origin is at 600 units in
# +y from the old origin and the y axis now increases with positive y
# going down the window.
CGContextConcatCTM(context, CGAffineTransformMake(1, 0, 0, -1, 0, 600))
# This text will be flipped along with the CTM.
CGContextShowTextAtPoint(context, 10, 10, text, textlen)
# Obtain the user space coordinates of the current text position.
textPosition = CGContextGetTextPosition(context)
# Draw text at that point but flipped in y.
showFlippedTextAtPoint(context, textPosition.x, textPosition.y, text, textlen)
CGContextRestoreGState(context)
|
|
"""
Test cases for the Comparisons class over the Chart elements
"""
from unittest import SkipTest, skipIf
import numpy as np
from holoviews.core import NdOverlay
from holoviews.core.options import Store
from holoviews.element import (
Area, BoxWhisker, Curve, Distribution, HSpan, Image, Points,
Rectangles, RGB, Scatter, Segments, Violin, VSpan, Path,
QuadMesh, Polygons
)
from holoviews.element.comparison import ComparisonTestCase
try:
import datashader as ds
except:
ds = None
try:
import spatialpandas as spd
except:
spd = None
try:
import shapely
except:
shapely = None
spd_available = skipIf(spd is None, "spatialpandas is not available")
shapelib_available = skipIf(shapely is None and spd is None,
'Neither shapely nor spatialpandas are available')
shapely_available = skipIf(shapely is None, 'shapely is not available')
ds_available = skipIf(ds is None, 'datashader not available')
class TestSelection1DExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_area_selection_numeric(self):
area = Area([3, 2, 1, 3, 4])
expr, bbox, region = area._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: VSpan(1, 3)}))
def test_area_selection_numeric_inverted(self):
area = Area([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = area._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 3)}))
def test_area_selection_categorical(self):
area = Area((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = area._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C']
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C']})
self.assertEqual(expr.apply(area), np.array([True, True, True, False, False]))
self.assertEqual(region, NdOverlay({0: VSpan(0, 2)}))
def test_area_selection_numeric_index_cols(self):
area = Area([3, 2, 1, 3, 2])
expr, bbox, region = area._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(area), np.array([False, True, True, False, True]))
self.assertEqual(region, None)
def test_curve_selection_numeric(self):
curve = Curve([3, 2, 1, 3, 4])
expr, bbox, region = curve._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(curve), np.array([False, True, True, True, False]))
self.assertEqual(region, NdOverlay({0: VSpan(1, 3)}))
def test_curve_selection_categorical(self):
curve = Curve((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = curve._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C']
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C']})
self.assertEqual(expr.apply(curve), np.array([True, True, True, False, False]))
self.assertEqual(region, NdOverlay({0: VSpan(0, 2)}))
def test_curve_selection_numeric_index_cols(self):
curve = Curve([3, 2, 1, 3, 2])
expr, bbox, region = curve._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3)})
self.assertEqual(expr.apply(curve), np.array([False, True, True, False, True]))
self.assertEqual(region, None)
def test_box_whisker_single(self):
box_whisker = BoxWhisker(list(range(10)))
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(box_whisker), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
def test_box_whisker_single_inverted(self):
box = BoxWhisker(list(range(10))).opts(invert_axes=True)
expr, bbox, region = box._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(box), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_box_whisker_cats(self):
box_whisker = BoxWhisker((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(box_whisker), np.array([
False, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 7)}))
def test_box_whisker_cats_index_cols(self):
box_whisker = BoxWhisker((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = box_whisker._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B'], index_cols=['x']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(box_whisker), np.array([
True, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, None)
def test_violin_single(self):
violin = Violin(list(range(10)))
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(violin), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
def test_violin_single_inverted(self):
violin = Violin(list(range(10))).opts(invert_axes=True)
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'y': (3, 7)})
self.assertEqual(expr.apply(violin), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_violin_cats(self):
violin = Violin((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(violin), np.array([
False, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(1, 7)}))
def test_violin_cats_index_cols(self):
violin = Violin((['A', 'A', 'A', 'B', 'B', 'C', 'C', 'C', 'C', 'C'], list(range(10))), 'x', 'y')
expr, bbox, region = violin._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 7), x_selection=['A', 'B'], index_cols=['x']
)
self.assertEqual(bbox, {'y': (1, 7), 'x': ['A', 'B']})
self.assertEqual(expr.apply(violin), np.array([
True, True, True, True, True, False, False, False, False, False
]))
self.assertEqual(region, None)
def test_distribution_single(self):
dist = Distribution(list(range(10)))
expr, bbox, region = dist._get_selection_expr_for_stream_value(
bounds=(3, 0, 7, 1)
)
self.assertEqual(bbox, {'Value': (3, 7)})
self.assertEqual(expr.apply(dist), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: VSpan(3, 7)}))
def test_distribution_single_inverted(self):
dist = Distribution(list(range(10))).opts(invert_axes=True)
expr, bbox, region = dist._get_selection_expr_for_stream_value(
bounds=(0, 3, 1, 7)
)
self.assertEqual(bbox, {'Value': (3, 7)})
self.assertEqual(expr.apply(dist), np.array([
False, False, False, True, True, True, True, True, False, False
]))
self.assertEqual(region, NdOverlay({0: HSpan(3, 7)}))
class TestSelection2DExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_points_selection_numeric(self):
points = Points([3, 2, 1, 3, 4])
expr, bbox, region = points._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(1, 0, 3, 2)]) * Path([]))
def test_points_selection_numeric_inverted(self):
points = Points([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = points._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
@shapelib_available
def test_points_selection_geom(self):
points = Points([3, 2, 1, 3, 4])
geom = np.array([(-0.1, -0.1), (1.4, 0), (1.4, 2.2), (-0.1, 2.2)])
expr, bbox, region = points._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x': np.array([-0.1, 1.4, 1.4, -0.1]),
'y': np.array([-0.1, 0, 2.2, 2.2])})
self.assertEqual(expr.apply(points), np.array([False, True, False, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.1, -0.1)]]))
@shapelib_available
def test_points_selection_geom_inverted(self):
points = Points([3, 2, 1, 3, 4]).opts(invert_axes=True)
geom = np.array([(-0.1, -0.1), (1.4, 0), (1.4, 2.2), (-0.1, 2.2)])
expr, bbox, region = points._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y': np.array([-0.1, 1.4, 1.4, -0.1]),
'x': np.array([-0.1, 0, 2.2, 2.2])})
self.assertEqual(expr.apply(points), np.array([False, False, True, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.1, -0.1)]]))
def test_points_selection_categorical(self):
points = Points((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = points._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C'], y_selection=None
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C'], 'y': (1, 3)})
self.assertEqual(expr.apply(points), np.array([True, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_points_selection_numeric_index_cols(self):
points = Points([3, 2, 1, 3, 2])
expr, bbox, region = points._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(points), np.array([False, False, True, False, False]))
self.assertEqual(region, None)
def test_scatter_selection_numeric(self):
scatter = Scatter([3, 2, 1, 3, 4])
expr, bbox, region = scatter._get_selection_expr_for_stream_value(bounds=(1, 0, 3, 2))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(1, 0, 3, 2)]) * Path([]))
def test_scatter_selection_numeric_inverted(self):
scatter = Scatter([3, 2, 1, 3, 4]).opts(invert_axes=True)
expr, bbox, region = scatter._get_selection_expr_for_stream_value(bounds=(0, 1, 2, 3))
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_scatter_selection_categorical(self):
scatter = Scatter((['B', 'A', 'C', 'D', 'E'], [3, 2, 1, 3, 4]))
expr, bbox, region = scatter._get_selection_expr_for_stream_value(
bounds=(0, 1, 2, 3), x_selection=['B', 'A', 'C'], y_selection=None
)
self.assertEqual(bbox, {'x': ['B', 'A', 'C'], 'y': (1, 3)})
self.assertEqual(expr.apply(scatter), np.array([True, True, True, False, False]))
self.assertEqual(region, Rectangles([(0, 1, 2, 3)]) * Path([]))
def test_scatter_selection_numeric_index_cols(self):
scatter = Scatter([3, 2, 1, 3, 2])
expr, bbox, region = scatter._get_selection_expr_for_stream_value(
bounds=(1, 0, 3, 2), index_cols=['y']
)
self.assertEqual(bbox, {'x': (1, 3), 'y': (0, 2)})
self.assertEqual(expr.apply(scatter), np.array([False, False, True, False, False]))
self.assertEqual(region, None)
def test_image_selection_numeric(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3)))
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(0.5, 1.5, 2.1, 3.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(0.5, 1.5, 2.1, 3.1)]) * Path([]))
def test_image_selection_numeric_inverted(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3))).opts(invert_axes=True)
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(1.5, 0.5, 3.1, 2.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(1.5, 0.5, 3.1, 2.1)]) * Path([]))
@ds_available
@spd_available
def test_img_selection_geom(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3)))
geom = np.array([(-0.4, -0.1), (0.6, -0.1), (0.4, 1.7), (-0.1, 1.7)])
expr, bbox, region = img._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x': np.array([-0.4, 0.6, 0.4, -0.1]),
'y': np.array([-0.1, -0.1, 1.7, 1.7])})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[ 1., np.nan, np.nan],
[ 1., np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]
]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
@ds_available
def test_img_selection_geom_inverted(self):
img = Image(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3))).opts(invert_axes=True)
geom = np.array([(-0.4, -0.1), (0.6, -0.1), (0.4, 1.7), (-0.1, 1.7)])
expr, bbox, region = img._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y': np.array([-0.4, 0.6, 0.4, -0.1]),
'x': np.array([-0.1, -0.1, 1.7, 1.7])})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[ True, True, False],
[ False, False, False],
[ False, False, False],
[False, False, False]
]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
def test_rgb_selection_numeric(self):
img = RGB(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3, 3)))
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(0.5, 1.5, 2.1, 3.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(0.5, 1.5, 2.1, 3.1)]) * Path([]))
def test_rgb_selection_numeric_inverted(self):
img = RGB(([0, 1, 2], [0, 1, 2, 3], np.random.rand(4, 3, 3))).opts(invert_axes=True)
expr, bbox, region = img._get_selection_expr_for_stream_value(bounds=(1.5, 0.5, 3.1, 2.1))
self.assertEqual(bbox, {'x': (0.5, 2.1), 'y': (1.5, 3.1)})
self.assertEqual(expr.apply(img, expanded=True, flat=False), np.array([
[False, False, False],
[False, False, False],
[False, True, True],
[False, True, True]
]))
self.assertEqual(region, Rectangles([(1.5, 0.5, 3.1, 2.1)]) * Path([]))
def test_quadmesh_selection(self):
n = 4
coords = np.linspace(-1.5,1.5,n)
X,Y = np.meshgrid(coords, coords);
Qx = np.cos(Y) - np.cos(X)
Qy = np.sin(Y) + np.sin(X)
Z = np.sqrt(X**2 + Y**2)
qmesh = QuadMesh((Qx, Qy, Z))
expr, bbox, region = qmesh._get_selection_expr_for_stream_value(bounds=(0, -0.5, 0.7, 1.5))
self.assertEqual(bbox, {'x': (0, 0.7), 'y': (-0.5, 1.5)})
self.assertEqual(expr.apply(qmesh, expanded=True, flat=False), np.array([
[False, False, False, True],
[False, False, True, False],
[False, True, True, False],
[True, False, False, False]
]))
self.assertEqual(region, Rectangles([(0, -0.5, 0.7, 1.5)]) * Path([]))
def test_quadmesh_selection_inverted(self):
n = 4
coords = np.linspace(-1.5,1.5,n)
X,Y = np.meshgrid(coords, coords);
Qx = np.cos(Y) - np.cos(X)
Qy = np.sin(Y) + np.sin(X)
Z = np.sqrt(X**2 + Y**2)
qmesh = QuadMesh((Qx, Qy, Z)).opts(invert_axes=True)
expr, bbox, region = qmesh._get_selection_expr_for_stream_value(bounds=(0, -0.5, 0.7, 1.5))
self.assertEqual(bbox, {'x': (-0.5, 1.5), 'y': (0, 0.7)})
self.assertEqual(expr.apply(qmesh, expanded=True, flat=False), np.array([
[False, False, False, True],
[False, False, True, True],
[False, True, False, False],
[True, False, False, False]
]))
self.assertEqual(region, Rectangles([(0, -0.5, 0.7, 1.5)]) * Path([]))
class TestSelectionGeomExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_rect_selection_numeric(self):
rect = Rectangles([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)])
expr, bbox, region = rect._get_selection_expr_for_stream_value(bounds=(0.5, 0.9, 3.4, 4.9))
self.assertEqual(bbox, {'x0': (0.5, 3.4), 'y0': (0.9, 4.9), 'x1': (0.5, 3.4), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(rect), np.array([False, True, False]))
self.assertEqual(region, Rectangles([(0.5, 0.9, 3.4, 4.9)]) * Path([]))
expr, bbox, region = rect._get_selection_expr_for_stream_value(bounds=(0, 0.9, 3.5, 4.9))
self.assertEqual(bbox, {'x0': (0, 3.5), 'y0': (0.9, 4.9), 'x1': (0, 3.5), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(rect), np.array([True, True, True]))
self.assertEqual(region, Rectangles([(0, 0.9, 3.5, 4.9)]) * Path([]))
def test_rect_selection_numeric_inverted(self):
rect = Rectangles([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)]).opts(invert_axes=True)
expr, bbox, region = rect._get_selection_expr_for_stream_value(bounds=(0.9, 0.5, 4.9, 3.4))
self.assertEqual(bbox, {'x0': (0.5, 3.4), 'y0': (0.9, 4.9), 'x1': (0.5, 3.4), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(rect), np.array([False, True, False]))
self.assertEqual(region, Rectangles([(0.9, 0.5, 4.9, 3.4)]) * Path([]))
expr, bbox, region = rect._get_selection_expr_for_stream_value(bounds=(0.9, 0, 4.9, 3.5))
self.assertEqual(bbox, {'x0': (0, 3.5), 'y0': (0.9, 4.9), 'x1': (0, 3.5), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(rect), np.array([True, True, True]))
self.assertEqual(region, Rectangles([(0.9, 0, 4.9, 3.5)]) * Path([]))
@shapely_available
def test_rect_geom_selection(self):
rect = Rectangles([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)])
geom = np.array([(-0.4, -0.1), (2.2, -0.1), (2.2, 4.1), (-0.1, 4.2)])
expr, bbox, region = rect._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x0': np.array([-0.4, 2.2, 2.2, -0.1]),
'y0': np.array([-0.1, -0.1, 4.1, 4.2]),
'x1': np.array([-0.4, 2.2, 2.2, -0.1]),
'y1': np.array([-0.1, -0.1, 4.1, 4.2])})
self.assertEqual(expr.apply(rect), np.array([True, True, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
@shapely_available
def test_rect_geom_selection_inverted(self):
rect = Rectangles([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)]).opts(invert_axes=True)
geom = np.array([(-0.4, -0.1), (3.2, -0.1), (3.2, 4.1), (-0.1, 4.2)])
expr, bbox, region = rect._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y0': np.array([-0.4, 3.2, 3.2, -0.1]),
'x0': np.array([-0.1, -0.1, 4.1, 4.2]),
'y1': np.array([-0.4, 3.2, 3.2, -0.1]),
'x1': np.array([-0.1, -0.1, 4.1, 4.2])})
self.assertEqual(expr.apply(rect), np.array([True, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
def test_segments_selection_numeric(self):
segs = Segments([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)])
expr, bbox, region = segs._get_selection_expr_for_stream_value(bounds=(0.5, 0.9, 3.4, 4.9))
self.assertEqual(bbox, {'x0': (0.5, 3.4), 'y0': (0.9, 4.9), 'x1': (0.5, 3.4), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(segs), np.array([False, True, False]))
self.assertEqual(region, Rectangles([(0.5, 0.9, 3.4, 4.9)]) * Path([]))
expr, bbox, region = segs._get_selection_expr_for_stream_value(bounds=(0, 0.9, 3.5, 4.9))
self.assertEqual(bbox, {'x0': (0, 3.5), 'y0': (0.9, 4.9), 'x1': (0, 3.5), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(segs), np.array([True, True, True]))
self.assertEqual(region, Rectangles([(0, 0.9, 3.5, 4.9)]) * Path([]))
def test_segs_selection_numeric_inverted(self):
segs = Segments([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)]).opts(invert_axes=True)
expr, bbox, region = segs._get_selection_expr_for_stream_value(bounds=(0.9, 0.5, 4.9, 3.4))
self.assertEqual(bbox, {'x0': (0.5, 3.4), 'y0': (0.9, 4.9), 'x1': (0.5, 3.4), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(segs), np.array([False, True, False]))
self.assertEqual(region, Rectangles([(0.9, 0.5, 4.9, 3.4)]) * Path([]))
expr, bbox, region = segs._get_selection_expr_for_stream_value(bounds=(0.9, 0, 4.9, 3.5))
self.assertEqual(bbox, {'x0': (0, 3.5), 'y0': (0.9, 4.9), 'x1': (0, 3.5), 'y1': (0.9, 4.9)})
self.assertEqual(expr.apply(segs), np.array([True, True, True]))
self.assertEqual(region, Rectangles([(0.9, 0, 4.9, 3.5)]) * Path([]))
@shapely_available
def test_segs_geom_selection(self):
rect = Segments([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)])
geom = np.array([(-0.4, -0.1), (2.2, -0.1), (2.2, 4.1), (-0.1, 4.2)])
expr, bbox, region = rect._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x0': np.array([-0.4, 2.2, 2.2, -0.1]),
'y0': np.array([-0.1, -0.1, 4.1, 4.2]),
'x1': np.array([-0.4, 2.2, 2.2, -0.1]),
'y1': np.array([-0.1, -0.1, 4.1, 4.2])})
self.assertEqual(expr.apply(rect), np.array([True, True, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
@shapely_available
def test_segs_geom_selection_inverted(self):
rect = Segments([(0, 1, 2, 3), (1, 3, 1.5, 4), (2.5, 4.2, 3.5, 4.8)]).opts(invert_axes=True)
geom = np.array([(-0.4, -0.1), (3.2, -0.1), (3.2, 4.1), (-0.1, 4.2)])
expr, bbox, region = rect._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y0': np.array([-0.4, 3.2, 3.2, -0.1]),
'x0': np.array([-0.1, -0.1, 4.1, 4.2]),
'y1': np.array([-0.4, 3.2, 3.2, -0.1]),
'x1': np.array([-0.1, -0.1, 4.1, 4.2])})
self.assertEqual(expr.apply(rect), np.array([True, False, False]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(-0.4, -0.1)]]))
class TestSelectionPolyExpr(ComparisonTestCase):
def setUp(self):
try:
import holoviews.plotting.bokeh # noqa
except:
raise SkipTest("Bokeh selection tests require bokeh.")
super().setUp()
self._backend = Store.current_backend
Store.set_current_backend('bokeh')
def tearDown(self):
Store.current_backend = self._backend
def test_poly_selection_numeric(self):
poly = Polygons([
[(0, 0), (0.2, 0.1), (0.3, 0.4), (0.1, 0.2)],
[(0.25, -.1), (0.4, 0.2), (0.6, 0.3), (0.5, 0.1)],
[(0.3, 0.3), (0.5, 0.4), (0.6, 0.5), (0.35, 0.45)]
])
expr, bbox, region = poly._get_selection_expr_for_stream_value(bounds=(0.2, -0.2, 0.6, 0.6))
self.assertEqual(bbox, {'x': (0.2, 0.6), 'y': (-0.2, 0.6)})
self.assertEqual(expr.apply(poly, expanded=False), np.array([False, True, True]))
self.assertEqual(region, Rectangles([(0.2, -0.2, 0.6, 0.6)]) * Path([]))
def test_poly_selection_numeric_inverted(self):
poly = Polygons([
[(0, 0), (0.2, 0.1), (0.3, 0.4), (0.1, 0.2)],
[(0.25, -.1), (0.4, 0.2), (0.6, 0.3), (0.5, 0.1)],
[(0.3, 0.3), (0.5, 0.4), (0.6, 0.5), (0.35, 0.45)]
]).opts(invert_axes=True)
expr, bbox, region = poly._get_selection_expr_for_stream_value(bounds=(0.2, -0.2, 0.6, 0.6))
self.assertEqual(bbox, {'y': (0.2, 0.6), 'x': (-0.2, 0.6)})
self.assertEqual(expr.apply(poly, expanded=False), np.array([False, False, True]))
self.assertEqual(region, Rectangles([(0.2, -0.2, 0.6, 0.6)]) * Path([]))
@shapely_available
def test_poly_geom_selection(self):
poly = Polygons([
[(0, 0), (0.2, 0.1), (0.3, 0.4), (0.1, 0.2)],
[(0.25, -.1), (0.4, 0.2), (0.6, 0.3), (0.5, 0.1)],
[(0.3, 0.3), (0.5, 0.4), (0.6, 0.5), (0.35, 0.45)]
])
geom = np.array([(0.2, -0.15), (0.5, 0), (0.75, 0.6), (0.1, 0.45)])
expr, bbox, region = poly._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'x': np.array([0.2, 0.5, 0.75, 0.1]),
'y': np.array([-0.15, 0, 0.6, 0.45])})
self.assertEqual(expr.apply(poly, expanded=False), np.array([False, True, True]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(0.2, -0.15)]]))
@shapely_available
def test_poly_geom_selection_inverted(self):
poly = Polygons([
[(0, 0), (0.2, 0.1), (0.3, 0.4), (0.1, 0.2)],
[(0.25, -.1), (0.4, 0.2), (0.6, 0.3), (0.5, 0.1)],
[(0.3, 0.3), (0.5, 0.4), (0.6, 0.5), (0.35, 0.45)]
]).opts(invert_axes=True)
geom = np.array([(0.2, -0.15), (0.5, 0), (0.75, 0.6), (0.1, 0.6)])
expr, bbox, region = poly._get_selection_expr_for_stream_value(geometry=geom)
self.assertEqual(bbox, {'y': np.array([0.2, 0.5, 0.75, 0.1]),
'x': np.array([-0.15, 0, 0.6, 0.6])})
self.assertEqual(expr.apply(poly, expanded=False), np.array([False, False, True]))
self.assertEqual(region, Rectangles([]) * Path([list(geom)+[(0.2, -0.15)]]))
|
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound, PermissionDenied
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Registration
from api.base import permissions as base_permissions
from api.base import generic_bulk_views as bulk_views
from api.base.filters import ListFilterMixin
from api.base.views import JSONAPIBaseView, BaseContributorDetail, BaseContributorList, BaseNodeLinksDetail, BaseNodeLinksList, WaterButlerMixin
from api.base.serializers import HideIfWithdrawal, LinkedRegistrationsRelationshipSerializer
from api.base.serializers import LinkedNodesRelationshipSerializer
from api.base.pagination import NodeContributorPagination
from api.base.parsers import JSONAPIRelationshipParser
from api.base.parsers import JSONAPIRelationshipParserForRegularJSON
from api.base.utils import get_user_auth, default_registration_list_queryset, default_registration_permission_queryset, is_bulk_request, is_truthy
from api.comments.serializers import RegistrationCommentSerializer, CommentCreateSerializer
from api.identifiers.serializers import RegistrationIdentifierSerializer
from api.nodes.views import NodeIdentifierList
from api.users.views import UserMixin
from api.nodes.permissions import (
ReadOnlyIfRegistration,
ContributorDetailPermissions,
ContributorOrPublic,
ContributorOrPublicForRelationshipPointers,
AdminOrPublic,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
from api.registrations.serializers import (
RegistrationSerializer,
RegistrationDetailSerializer,
RegistrationContributorsSerializer,
RegistrationProviderSerializer
)
from api.nodes.filters import NodesFilterMixin
from api.nodes.views import (
NodeMixin, NodeRegistrationsList, NodeLogList,
NodeCommentsList, NodeProvidersList, NodeFilesList, NodeFileDetail,
NodeInstitutionsList, NodeForksList, NodeWikiList, LinkedNodesList,
NodeViewOnlyLinksList, NodeViewOnlyLinkDetail, NodeCitationDetail, NodeCitationStyleDetail,
NodeLinkedRegistrationsList,
)
from api.registrations.serializers import RegistrationNodeLinksSerializer, RegistrationFileSerializer
from api.wikis.serializers import RegistrationWikiSerializer
from api.base.utils import get_object_or_error
class RegistrationMixin(NodeMixin):
"""Mixin with convenience methods for retrieving the current registration based on the
current URL. By default, fetches the current registration based on the node_id kwarg.
"""
serializer_class = RegistrationSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
AbstractNode,
self.kwargs[self.node_lookup_url_kwarg],
self.request,
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_collection or not node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class RegistrationList(JSONAPIBaseView, generics.ListAPIView, bulk_views.BulkUpdateJSONAPIView, NodesFilterMixin):
"""Node Registrations.
Registrations are read-only snapshots of a project. This view is a list of all current registrations for which a user
has access. A withdrawn registration will display a limited subset of information, namely, title, description,
date_created, registration, withdrawn, date_registered, withdrawal_justification, and registration supplement. All
other fields will be displayed as null. Additionally, the only relationships permitted to be accessed for a withdrawn
registration are the contributors - other relationships will return a 403.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registrations's detail view are not necessary. Unregistered nodes cannot be accessed through this endpoint.
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
###Other Relationships
See documentation on registered_from detail view. A registration has many of the same properties as a node.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'registration-list'
ordering = ('-date_modified',)
model_class = Registration
# overrides BulkUpdateJSONAPIView
def get_serializer_class(self):
"""
Use RegistrationDetailSerializer which requires 'id'
"""
if self.request.method in ('PUT', 'PATCH'):
return RegistrationDetailSerializer
else:
return RegistrationSerializer
# overrides NodesFilterMixin
def get_default_queryset(self):
return default_registration_list_queryset() & default_registration_permission_queryset(self.request.user)
def is_blacklisted(self):
query_params = self.parse_query_params(self.request.query_params)
for key, field_names in query_params.iteritems():
for field_name, data in field_names.iteritems():
field = self.serializer_class._declared_fields.get(field_name)
if isinstance(field, HideIfWithdrawal):
return True
return False
# overrides ListAPIView, ListBulkCreateJSONAPIView
def get_queryset(self):
# For bulk requests, queryset is formed from request body.
if is_bulk_request(self.request):
auth = get_user_auth(self.request)
registrations = Registration.objects.filter(guids___id__in=[registration['id'] for registration in self.request.data])
# If skip_uneditable=True in query_params, skip nodes for which the user
# does not have EDIT permissions.
if is_truthy(self.request.query_params.get('skip_uneditable', False)):
has_permission = registrations.filter(contributor__user_id=auth.user.id, contributor__write=True).values_list('guids___id', flat=True)
return Registration.objects.filter(guids___id__in=has_permission)
for registration in registrations:
if not registration.can_edit(auth):
raise PermissionDenied
return registrations
blacklisted = self.is_blacklisted()
registrations = self.get_queryset_from_request().distinct('id', 'date_modified')
# If attempting to filter on a blacklisted field, exclude withdrawals.
if blacklisted:
return registrations.exclude(retraction__isnull=False)
return registrations
class RegistrationDetail(JSONAPIBaseView, generics.RetrieveUpdateAPIView, RegistrationMixin, WaterButlerMixin):
"""Node Registrations.
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, date_created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Actions
###Update
Method: PUT / PATCH
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "registrations", # required
"id": {registration_id}, # required
"attributes": {
"public": true # required
}
}
}
Success: 200 OK + node representation
To turn a registration from private to public, issue either a PUT or a PATCH request against the `/links/self` URL.
Registrations can only be turned from private to public, not vice versa. The "public" field is the only field that can
be modified on a registration and you must have admin permission to do so.
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
###Other Relationships
See documentation on registered_from detail view. A registration has many of the same properties as a node.
##Links
self: the canonical api endpoint of this registration
html: this registration's page on the OSF website
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
AdminOrPublic,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationDetailSerializer
view_category = 'registrations'
view_name = 'registration-detail'
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationContributorsList(BaseContributorList, RegistrationMixin, UserMixin):
"""Contributors (users) for a registration.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
##Node Contributor Attributes
<!--- Copied Attributes from NodeContributorDetail -->
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
###Users
This endpoint shows the contributor user detail and is automatically embedded.
##Actions
###Adding Contributors
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "contributors", # required
"attributes": {
"bibliographic": true|false, # optional
"permission": "read"|"write"|"admin" # optional
},
"relationships": {
"users": {
"data": {
"type": "users", # required
"id": "{user_id}" # required
}
}
}
}
}
Success: 201 CREATED + node contributor representation
Add a contributor to a node by issuing a POST request to this endpoint. This effectively creates a relationship
between the node and the user. Besides the top-level type, there are optional "attributes" which describe the
relationship between the node and the user. `bibliographic` is a boolean and defaults to `true`. `permission` must
be a [valid OSF permission key](/v2/#osf-node-permission-keys) and defaults to `"write"`. A relationship object
with a "data" member, containing the user `type` and user `id` must be included. The id must be a valid user id.
All other fields not listed above will be ignored. If the request is successful the API will return
a 201 response with the representation of the new node contributor in the body. For the new node contributor's
canonical URL, see the `/links/self` field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
NodeContributors may be filtered by `bibliographic`, or `permission` attributes. `bibliographic` is a boolean, and
can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true` or `false` in
the query will cause the match to fail regardless.
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` of the user entities so that it points to
the user's profile image scaled to the given size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-contributors'
pagination_class = NodeContributorPagination
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
def get_default_queryset(self):
node = self.get_node(check_object_permissions=False)
return node.contributor_set.all()
class RegistrationContributorDetail(BaseContributorDetail, RegistrationMixin, UserMixin):
"""Detail of a contributor for a registration.
Contributors are users who can make changes to the node or, in the case of private nodes,
have read access to the node. Contributors are divided between 'bibliographic' and 'non-bibliographic'
contributors. From a permissions standpoint, both are the same, but bibliographic contributors
are included in citations, while non-bibliographic contributors are not included in citations.
Note that if an anonymous view_only key is being used, the user relationship will not be exposed and the id for
the contributor will be an empty string.
Contributors can be viewed, removed, and have their permissions and bibliographic status changed via this
endpoint.
##Attributes
`type` is "contributors"
name type description
======================================================================================================
bibliographic boolean Whether the user will be included in citations for this node. Default is true.
permission string User permission level. Must be "read", "write", or "admin". Default is "write".
unregistered_contributor string Contributor's assigned name if contributor hasn't yet claimed account
###Users
This endpoint shows the contributor user detail.
##Links
self: the canonical api endpoint of this contributor
html: the contributing user's page on the OSF website
profile_image: a url to the contributing user's profile image
##Query Params
+ `profile_image_size=<Int>` -- Modifies `/links/profile_image_url` so that it points the image scaled to the given
size in pixels. If left blank, the size depends on the image provider.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-contributor-detail'
serializer_class = RegistrationContributorsSerializer
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
permission_classes = (
ContributorDetailPermissions,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
class RegistrationChildrenList(JSONAPIBaseView, generics.ListAPIView, ListFilterMixin, RegistrationMixin):
"""Children of the current registration.
This will get the next level of child nodes for the selected node if the current user has read access for those
nodes. Creating a node via this endpoint will behave the same as the [node list endpoint](/v2/nodes/), but the new
node will have the selected node set as its parent.
##Node Attributes
<!--- Copied Attributes from NodeDetail -->
OSF Node entities have the "nodes" `type`.
name type description
=================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
registration boolean is this a registration? (always false - may be deprecated in future versions)
fork boolean is this node a fork of another node?
public boolean has this node been made publicly-visible?
collection boolean is this a collection? (always false - may be deprecated in future versions)
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `id`, `title`, `category`, `description`, `public`, `tags`, `date_created`, `date_modified`,
`root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple substring matching. `public`
is a boolean, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note that quoting `true`
or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-children'
serializer_class = RegistrationSerializer
permission_classes = (
ContributorOrPublic,
drf_permissions.IsAuthenticatedOrReadOnly,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
ordering = ('-date_modified',)
def get_default_queryset(self):
return default_registration_list_queryset() & default_registration_permission_queryset(self.request.user)
def get_queryset(self):
registration = self.get_node()
registration_pks = registration.node_relations.filter(is_node_link=False).select_related('child').values_list('child__pk', flat=True)
return self.get_queryset_from_request().filter(pk__in=registration_pks).can_view(self.request.user).order_by('-date_modified')
class RegistrationCitationDetail(NodeCitationDetail, RegistrationMixin):
""" The registration citation for a registration in CSL format *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##RegistraitonCitationDetail Attributes
name type description
=================================================================================
id string unique ID for the citation
title string title of project or component
author list list of authors for the work
publisher string publisher - most always 'Open Science Framework'
type string type of citation - web
doi string doi of the resource
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-citation'
class RegistrationCitationStyleDetail(NodeCitationStyleDetail, RegistrationMixin):
""" The registration citation for a registration in a specific style's format t *read only*
##Note
**This API endpoint is under active development, and is subject to change in the future**
##RegistrationCitationStyleDetail Attributes
name type description
=================================================================================
citation string complete citation for a registration in the given style
"""
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
view_category = 'registrations'
view_name = 'registration-style-citation'
class RegistrationForksList(NodeForksList, RegistrationMixin):
"""Forks of the current registration. *Writeable*.
Paginated list of the current node's forks ordered by their `forked_date`. Forks are copies of projects that you can
change without affecting the original project. When creating a fork, your fork will will only contain public components or those
for which you are a contributor. Private components that you do not have access to will not be forked.
##Node Fork Attributes
<!--- Copied Attributes from NodeDetail with exception of forked_date-->
OSF Node Fork entities have the "nodes" `type`.
name type description
===============================================================================================================================
title string title of project or component
description string description of the node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the node
registration boolean has this project been registered? (always False)
collection boolean is this node a collection (always False)
fork boolean is this node a fork of another node? (always True)
public boolean has this node been made publicly-visible?
forked_date iso8601 timestamp timestamp when the node was forked
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings List of strings representing the permissions for the current user on this node
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Actions
###Create Node Fork
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "nodes", # required
"attributes": {
"title": {title} # optional
}
}
}
Success: 201 CREATED + node representation
To create a fork of the current node, issue a POST request to this endpoint. The `title` field is optional, with the
default title being 'Fork of ' + the current node's title. If the fork's creation is successful the API will return a
201 response with the representation of the forked node in the body. For the new fork's canonical URL, see the `/links/self`
field of the response.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
<!--- Copied Query Params from NodeList -->
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, `tags`, `date_created`,
`date_modified`, `root`, `parent`, and `contributors`. Most are string fields and will be filtered using simple
substring matching. Others are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`.
Note that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-forks'
class RegistrationCommentsList(NodeCommentsList, RegistrationMixin):
"""List of comments for a registration."""
serializer_class = RegistrationCommentSerializer
view_category = 'registrations'
view_name = 'registration-comments'
def get_serializer_class(self):
if self.request.method == 'POST':
return CommentCreateSerializer
else:
return RegistrationCommentSerializer
class RegistrationLogList(NodeLogList, RegistrationMixin):
"""List of logs for a registration."""
view_category = 'registrations'
view_name = 'registration-logs'
class RegistrationProvidersList(NodeProvidersList, RegistrationMixin):
"""List of providers for a registration."""
serializer_class = RegistrationProviderSerializer
view_category = 'registrations'
view_name = 'registration-providers'
class RegistrationNodeLinksList(BaseNodeLinksList, RegistrationMixin):
"""Node Links to other nodes. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Node Link Attributes
`type` is "node_links"
None
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Relationships
### Target Node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Adding Node Links
Method: POST
URL: /links/self
Query Params: <none>
Body (JSON): {
"data": {
"type": "node_links", # required
"relationships": {
"nodes": {
"data": {
"type": "nodes", # required
"id": "{target_node_id}", # required
}
}
}
}
}
Success: 201 CREATED + node link representation
To add a node link (a pointer to another node), issue a POST request to this endpoint. This effectively creates a
relationship between the node and the target node. The target node must be described as a relationship object with
a "data" member, containing the nodes `type` and the target node `id`.
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointers'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: This class doesn't exist
# model_class = Pointer
class RegistrationNodeLinksDetail(BaseNodeLinksDetail, RegistrationMixin):
"""Node Link details. *Writeable*.
Node Links act as pointers to other nodes. Unlike Forks, they are not copies of nodes;
Node Links are a direct reference to the node that they point to.
##Attributes
`type` is "node_links"
None
##Links
*None*
##Relationships
###Target node
This endpoint shows the target node detail and is automatically embedded.
##Actions
###Remove Node Link
Method: DELETE
URL: /links/self
Query Params: <none>
Success: 204 No Content
To remove a node link from a node, issue a DELETE request to the `self` link. This request will remove the
relationship between the node and the target node, not the nodes themselves.
##Query Params
*None*.
#This Request/Response
"""
view_category = 'registrations'
view_name = 'registration-pointer-detail'
serializer_class = RegistrationNodeLinksSerializer
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ExcludeWithdrawals,
NodeLinksShowIfVersion,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NULL]
# TODO: this class doesn't exist
# model_class = Pointer
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationRegistrationsList(NodeRegistrationsList, RegistrationMixin):
"""List of registrations of a registration."""
view_category = 'registrations'
view_name = 'registration-registrations'
class RegistrationFilesList(NodeFilesList, RegistrationMixin):
"""List of files for a registration."""
view_category = 'registrations'
view_name = 'registration-files'
serializer_class = RegistrationFileSerializer
class RegistrationFileDetail(NodeFileDetail, RegistrationMixin):
"""Detail of a file for a registration."""
view_category = 'registrations'
view_name = 'registration-file-detail'
serializer_class = RegistrationFileSerializer
class RegistrationInstitutionsList(NodeInstitutionsList, RegistrationMixin):
"""List of the Institutions for a registration."""
view_category = 'registrations'
view_name = 'registration-institutions'
class RegistrationWikiList(NodeWikiList, RegistrationMixin):
"""List of wikis for a registration."""
view_category = 'registrations'
view_name = 'registration-wikis'
serializer_class = RegistrationWikiSerializer
class RegistrationLinkedNodesList(LinkedNodesList, RegistrationMixin):
"""List of linked nodes for a registration."""
view_category = 'registrations'
view_name = 'linked-nodes'
class RegistrationLinkedNodesRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
""" Relationship Endpoint for Nodes -> Linked Node relationships
Used to retrieve the ids of the linked nodes attached to this collection. For each id, there
exists a node link that contains that node.
##Actions
"""
view_category = 'registrations'
view_name = 'node-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedNodesRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON, )
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {'data': [
linked_node for linked_node in
node.linked_nodes.filter(is_deleted=False).exclude(type='osf.collection').exclude(type='osf.registration')
if linked_node.can_view(auth)
], 'self': node}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsRelationship(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin):
"""Relationship Endpoint for Registration -> Linked Registration relationships. *Read-only*
Used to retrieve the ids of the linked registrations attached to this collection. For each id, there
exists a node link that contains that registration.
"""
view_category = 'registrations'
view_name = 'node-registration-pointer-relationship'
permission_classes = (
ContributorOrPublicForRelationshipPointers,
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
ReadOnlyIfRegistration,
)
required_read_scopes = [CoreScopes.NODE_LINKS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = LinkedRegistrationsRelationshipSerializer
parser_classes = (JSONAPIRelationshipParser, JSONAPIRelationshipParserForRegularJSON,)
def get_object(self):
node = self.get_node(check_object_permissions=False)
auth = get_user_auth(self.request)
obj = {
'data': [
linked_registration for linked_registration in
node.linked_nodes.filter(is_deleted=False, type='osf.registration').exclude(type='osf.collection')
if linked_registration.can_view(auth)
],
'self': node
}
self.check_object_permissions(self.request, obj)
return obj
class RegistrationLinkedRegistrationsList(NodeLinkedRegistrationsList, RegistrationMixin):
"""List of registrations linked to this registration. *Read-only*.
Linked registrations are the registration nodes pointed to by node links.
<!--- Copied Spiel from RegistrationDetail -->
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A withdrawn registration will display a limited subset of information,
namely, title, description, date_created, registration, withdrawn, date_registered, withdrawal_justification, and
registration supplement. All other fields will be displayed as null. Additionally, the only relationships permitted
to be accessed for a withdrawn registration are the contributors - other relationships will return a 403.
##Linked Registration Attributes
<!--- Copied Attributes from RegistrationDetail -->
Registrations have the "registrations" `type`.
name type description
=======================================================================================================
title string title of the registered project or component
description string description of the registered node
category string bode category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
current_user_can_comment boolean Whether the current user is allowed to post comments
current_user_permissions array of strings list of strings representing the permissions for the current user on this node
fork boolean is this project a fork?
registration boolean has this project been registered? (always true - may be deprecated in future versions)
collection boolean is this registered node a collection? (always false - may be deprecated in future versions)
node_license object details of the license applied to the node
year string date range of the license
copyright_holders array of strings holders of the applied license
public boolean has this registration been made publicly-visible?
withdrawn boolean has this registration been withdrawn?
date_registered iso8601 timestamp timestamp that the registration was created
embargo_end_date iso8601 timestamp when the embargo on this registration will be lifted (if applicable)
withdrawal_justification string reasons for withdrawing the registration
pending_withdrawal boolean is this registration pending withdrawal?
pending_withdrawal_approval boolean is this registration pending approval?
pending_embargo_approval boolean is the associated Embargo awaiting approval by project admins?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
##Query Params
+ `page=<Int>` -- page number of results to view, default 1
+ `filter[<fieldname>]=<Str>` -- fields and values to filter the search results on.
Nodes may be filtered by their `title`, `category`, `description`, `public`, `registration`, or `tags`. `title`,
`description`, and `category` are string fields and will be filtered using simple substring matching. `public` and
`registration` are booleans, and can be filtered using truthy values, such as `true`, `false`, `0`, or `1`. Note
that quoting `true` or `false` in the query will cause the match to fail regardless. `tags` is an array of simple strings.
#This Request/Response
"""
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'linked-registrations'
class RegistrationViewOnlyLinksList(NodeViewOnlyLinksList, RegistrationMixin):
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-links'
class RegistrationViewOnlyLinkDetail(NodeViewOnlyLinkDetail, RegistrationMixin):
required_read_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_READ]
required_write_scopes = [CoreScopes.REGISTRATION_VIEW_ONLY_LINKS_WRITE]
view_category = 'registrations'
view_name = 'registration-view-only-link-detail'
class RegistrationIdentifierList(RegistrationMixin, NodeIdentifierList):
"""List of identifiers for a specified node. *Read-only*.
##Identifier Attributes
OSF Identifier entities have the "identifiers" `type`.
name type description
----------------------------------------------------------------------------
category string e.g. 'ark', 'doi'
value string the identifier value itself
##Links
self: this identifier's detail page
##Relationships
###Referent
The identifier is refers to this node.
##Actions
*None*.
##Query Params
Identifiers may be filtered by their category.
#This Request/Response
"""
serializer_class = RegistrationIdentifierSerializer
|
|
"""Classes to (de)serialize various struct messages"""
import struct
import six
import vdf
from six.moves import range
from steam.enums import EResult, EUniverse
from steam.enums.emsg import EMsg
from steam.utils.binary import StructReader
_emsg_map = {}
def get_struct(emsg):
return _emsg_map.get(emsg, None)
class StructMessageMeta(type):
"""Automatically adds subclasses of :class:`StructMessage` to the ``EMsg`` map"""
def __new__(metacls, name, bases, classdict):
cls = type.__new__(metacls, name, bases, classdict)
if name != 'StructMessage':
try:
_emsg_map[EMsg[name]] = cls
except KeyError:
pass
return cls
@six.add_metaclass(StructMessageMeta)
class StructMessage:
def __init__(self, data=None):
if data: self.load(data)
def serialize(self):
raise NotImplementedError
def load(self, data):
raise NotImplementedError
class ChannelEncryptRequest(StructMessage):
protocolVersion = 1
universe = EUniverse.Invalid
challenge = b''
def serialize(self):
return struct.pack("<II", self.protocolVersion, self.universe) + self.challenge
def load(self, data):
(self.protocolVersion,
universe,
) = struct.unpack_from("<II", data)
self.universe = EUniverse(universe)
if len(data) > 8:
self.challenge = data[8:]
def __str__(self):
return '\n'.join(["protocolVersion: %s" % self.protocolVersion,
"universe: %s" % repr(self.universe),
"challenge: %s" % repr(self.challenge),
])
class ChannelEncryptResponse(StructMessage):
protocolVersion = 1
keySize = 128
key = ''
crc = 0
def serialize(self):
return struct.pack("<II128sII",
self.protocolVersion,
self.keySize,
self.key,
self.crc,
0
)
def load(self, data):
(self.protocolVersion,
self.keySize,
self.key,
self.crc,
_,
) = struct.unpack_from("<II128sII", data)
def __str__(self):
return '\n'.join(["protocolVersion: %s" % self.protocolVersion,
"keySize: %s" % self.keySize,
"key: %s" % repr(self.key),
"crc: %s" % self.crc,
])
class ChannelEncryptResult(StructMessage):
eresult = EResult.Invalid
def serialize(self):
return struct.pack("<I", self.eresult)
def load(self, data):
(result,) = struct.unpack_from("<I", data)
self.eresult = EResult(result)
def __str__(self):
return "eresult: %s" % repr(self.eresult)
class ClientLogOnResponse(StructMessage):
eresult = EResult.Invalid
def serialize(self):
return struct.pack("<I", self.eresult)
def load(self, data):
(result,) = struct.unpack_from("<I", data)
self.eresult = EResult(result)
def __str__(self):
return "eresult: %s" % repr(self.eresult)
class ClientVACBanStatus(StructMessage):
class VACBanRange(object):
start = 0
end = 0
def __str__(self):
return '\n'.join(["{",
"start: %s" % self.start,
"end: %d" % self.end,
"}",
])
@property
def numBans(self):
return len(self.ranges)
def __init__(self, data):
self.ranges = list()
StructMessage.__init__(self, data)
def load(self, data):
buf = StructReader(data)
numBans, = buf.unpack("<I")
for _ in range(numBans):
m = self.VACBanRange()
self.ranges.append(m)
m.start, m.end, _ = buf.unpack("<III")
if m.start > m.end:
m.start, m.end = m.end, m.start
def __str__(self):
text = ["numBans: %d" % self.numBans]
for m in self.ranges: # emulate Protobuf text format
text.append("ranges " + str(m).replace("\n", "\n ", 2))
return '\n'.join(text)
class ClientChatMsg(StructMessage):
steamIdChatter = 0
steamIdChatRoom = 0
ChatMsgType = 0
text = ""
def serialize(self):
rbytes = struct.pack("<QQI",
self.steamIdChatter,
self.steamIdChatRoom,
self.ChatMsgType,
)
# utf-8 encode only when unicode in py2 and str in py3
rbytes += (self.text.encode('utf-8')
if (not isinstance(self.text, str) and bytes is str)
or isinstance(self.text, str)
else self.text
) + b'\x00'
return rbytes
def load(self, data):
buf = StructReader(data)
self.steamIdChatter, self.steamIdChatRoom, self.ChatMsgType = buf.unpack("<QQI")
self.text = buf.read_cstring().decode('utf-8')
def __str__(self):
return '\n'.join(["steamIdChatter: %d" % self.steamIdChatter,
"steamIdChatRoom: %d" % self.steamIdChatRoom,
"ChatMsgType: %d" % self.ChatMsgType,
"text: %s" % repr(self.text),
])
class ClientJoinChat(StructMessage):
steamIdChat = 0
isVoiceSpeaker = False
def serialize(self):
return struct.pack("<Q?",
self.steamIdChat,
self.isVoiceSpeaker
)
def load(self, data):
(self.steamIdChat,
self.isVoiceSpeaker
) = struct.unpack_from("<Q?", data)
def __str__(self):
return '\n'.join(["steamIdChat: %d" % self.steamIdChat,
"isVoiceSpeaker: %r" % self.isVoiceSpeaker,
])
class ClientChatMemberInfo(StructMessage):
steamIdChat = 0
type = 0
steamIdUserActedOn = 0
chatAction = 0
steamIdUserActedBy = 0
def serialize(self):
return struct.pack("<QIQIQ",
self.steamIdChat,
self.type,
self.steamIdUserActedOn,
self.chatAction,
self.steamIdUserActedBy
)
def load(self, data):
(self.steamIdChat,
self.type,
self.steamIdUserActedOn,
self.chatAction,
self.steamIdUserActedBy
) = struct.unpack_from("<QIQIQ", data)
def __str__(self):
return '\n'.join(["steamIdChat: %d" % self.steamIdChat,
"type: %r" % self.type,
"steamIdUserActedOn: %d" % self.steamIdUserActedOn,
"chatAction: %d" % self.chatAction,
"steamIdUserActedBy: %d" % self.steamIdUserActedBy
])
class ClientMarketingMessageUpdate2(StructMessage):
class MarketingMessage(object):
id = 0
url = ''
flags = 0
def __str__(self):
return '\n'.join(["{",
"id: %s" % self.id,
"url: %s" % repr(self.url),
"flags: %d" % self.flags,
"}",
])
time = 0
@property
def count(self):
return len(self.messages)
def __init__(self, data):
self.messages = list()
StructMessage.__init__(self, data)
def load(self, data):
buf = StructReader(data)
self.time, count = buf.unpack("<II")
for _ in range(count):
m = self.MarketingMessage()
self.messages.append(m)
length, m.id = buf.unpack("<IQ")
m.url = buf.read_cstring().decode('utf-8')
m.flags = buf.unpack("<I")
def __str__(self):
text = ["time: %s" % self.time,
"count: %d" % self.count,
]
for m in self.messages: # emulate Protobuf text format
text.append("messages " + str(m).replace("\n", "\n ", 3))
return '\n'.join(text)
class ClientUpdateGuestPassesList(StructMessage):
eresult = EResult.Invalid
countGuestPassesToGive = 0
countGuestPassesToRedeem = 0
# there is more to parse, but I dont have an sample to figure it out
# fairly sure this is deprecated anyway since introduction of the invetory system
def load(self, data):
(eresult,
self.countGuestPassesToGive,
self.countGuestPassesToRedeem,
) = struct.unpack_from("<III", data)
self.eresult = EResult(eresult)
def __str__(self):
return '\n'.join(["eresult: %s" % repr(self.eresult),
"countGuestPassesToGive: %d" % self.countGuestPassesToGive,
"countGuestPassesToRedeem: %d" % self.countGuestPassesToRedeem,
])
class ClientChatEnter(StructMessage):
steamIdChat = 0
steamIdFriend = 0
chatRoomType = 0
steamIdOwner = 0
steamIdClan = 0
chatFlags = 0
enterResponse = 0
numMembers = 0
chatRoomName = ""
memberList = []
def __init__(self, data=None):
if data: self.load(data)
def load(self, data):
buf, self.memberList = StructReader(data), list()
(self.steamIdChat, self.steamIdFriend, self.chatRoomType, self.steamIdOwner,
self.steamIdClan, self.chatFlags, self.enterResponse, self.numMembers
) = buf.unpack("<QQIQQ?II")
self.chatRoomName = buf.read_cstring().decode('utf-8')
for _ in range(self.numMembers):
self.memberList.append(vdf.binary_loads(buf.read(64))['MessageObject'])
self.UNKNOWN1, = buf.unpack("<I")
def __str__(self):
return '\n'.join(["steamIdChat: %d" % self.steamIdChat,
"steamIdFriend: %d" % self.steamIdFriend,
"chatRoomType: %r" % self.chatRoomType,
"steamIdOwner: %d" % self.steamIdOwner,
"steamIdClan: %d" % self.steamIdClan,
"chatFlags: %r" % self.chatFlags,
"enterResponse: %r" % self.enterResponse,
"numMembers: %r" % self.numMembers,
"chatRoomName: %s" % repr(self.chatRoomName),
] + map(lambda x: "memberList: %s" % x, self.memberList))
##################################################################################################
class _ResultStruct(StructMessage):
eresult = EResult.Invalid
def serialize(self):
return struct.pack("<I", self.eresult)
def load(self, data):
eresult, = struct.unpack_from("<I", data)
self.eresult = EResult(eresult)
def __str__(self):
return "eresult: %s" % repr(self.eresult)
##################################################################################################
class ClientRequestValidationMail(StructMessage):
UNKNOWN1 = b'\x00'
def serialize(self):
return self.UNKNOWN1
def load(self, data):
self.UNKNOWN1 = data
def __str__(self):
return "UNKNOWN1: %s" % repr(self.UNKNOWN1)
class ClientRequestValidationMailResponse(_ResultStruct):
pass
##################################################################################################
class ClientRequestChangeMail(StructMessage):
password = ''
UNKNOWN1 = 0
def serialize(self):
return struct.pack("<81sI", self.password[:80].encode('ascii'), self.UNKNOWN1)
def __str__(self):
return '\n'.join(["password: %s" % repr(self.password),
"UNKNOWN1: %d" % self.UNKNOWN1,
])
class ClientRequestChangeMailResponse(_ResultStruct):
pass
##################################################################################################
class ClientPasswordChange3(StructMessage):
password = ''
new_password = ''
code = ''
def serialize(self):
return (b'\x00'
+ self.password.encode('ascii') + b'\x00'
+ self.new_password.encode('ascii') + b'\x00'
+ self.code.encode('ascii') + b'\x00'
)
def __str__(self):
return '\n'.join(["password: %s" % repr(self.password),
"new_password: %s" % repr(self.new_password),
"code: %s" % repr(self.code),
])
class ClientPasswordChangeResponse(_ResultStruct):
pass
|
|
# coding: utf-8
"""
A custom FileBrowseField.
"""
from django.db import models
from django import forms
from django.forms.widgets import Input
from django.db.models.fields import Field, CharField
from django.utils.safestring import mark_safe
from django.forms.util import flatatt
from django.utils.encoding import StrAndUnicode, force_unicode, smart_unicode, smart_str
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.forms.fields import EMPTY_VALUES
import os
import re
from filebrowser.functions import _get_file_type, _url_join
from filebrowser.fb_settings import *
class FileBrowseFormField(forms.Field):
default_error_messages = {
'max_length': _(u'Ensure this value has at most %(max)d characters (it has %(length)d).'),
'min_length': _(u'Ensure this value has at least %(min)d characters (it has %(length)d).'),
'extension': _(u'Extension %(ext)s is not allowed. Only %(allowed)s is allowed.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
self.initial_directory = kwargs['initial_directory']
self.extensions_allowed = kwargs['extensions_allowed']
del kwargs['initial_directory']
del kwargs['extensions_allowed']
super(FileBrowseFormField, self).__init__(*args, **kwargs)
def clean(self, value):
"Validates max_length and min_length. Returns a Unicode object. Validates extension ..."
super(FileBrowseFormField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = smart_unicode(value)
value_length = len(value)
if self.max_length is not None and value_length > self.max_length:
raise forms.ValidationError(self.error_messages['max_length'] % {'max': self.max_length, 'length': value_length})
if self.min_length is not None and value_length < self.min_length:
raise forms.ValidationError(self.error_messages['min_length'] % {'min': self.min_length, 'length': value_length})
file_extension = os.path.splitext(value)[1].lower()
if self.extensions_allowed and not file_extension in self.extensions_allowed:
raise forms.ValidationError(self.error_messages['extension'] % {'ext': file_extension, 'allowed': ", ".join(self.extensions_allowed)})
return value
class FileBrowseWidget(Input):
input_type = 'text'
def __init__(self, attrs=None):
self.initial_directory = attrs['initial_directory']
self.extensions_allowed = attrs['extensions_allowed']
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif not isinstance(value, (str, unicode)):
value = value.original
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value == "":
final_attrs['initial_directory'] = _url_join(URL_ADMIN, final_attrs['initial_directory'])
else:
final_attrs['initial_directory'] = _url_join(URL_ADMIN, os.path.split(value)[0].replace(URL_WWW, ""))
if value != '':
# Only add the 'value' attribute if a value is non-empty.
final_attrs['value'] = force_unicode(value)
file = os.path.split(value)[1]
if len(URL_WWW) < len(os.path.split(value)[0]):
path = os.path.split(value)[0].replace(URL_WWW, "")
else:
path = ""
file_type = _get_file_type(file)
path_thumb = ""
if file_type == 'Image':
# check if thumbnail exists
if os.path.isfile(os.path.join(PATH_SERVER, path, THUMB_PREFIX + file)):
path_thumb = os.path.join(os.path.split(value)[0], THUMB_PREFIX + file)
else:
path_thumb = URL_FILEBROWSER_MEDIA + 'img/filebrowser_type_image.gif'
elif file_type == "Folder":
path_thumb = URL_FILEBROWSER_MEDIA + 'img/filebrowser_type_folder.gif'
else:
# if file is not an image, display file-icon (which is linked to the file) instead
path_thumb = URL_FILEBROWSER_MEDIA + 'img/filebrowser_type_' + file_type + '.gif'
final_attrs['thumbnail'] = path_thumb
path_search_icon = URL_FILEBROWSER_MEDIA + 'img/filebrowser_icon_show.gif'
final_attrs['search_icon'] = path_search_icon
return render_to_string("filebrowser/custom_field.html", locals())
class FileBrowserImageSize(object):
def __init__(self, image_type, original):
self.image_type = image_type
self.original = original
def __unicode__(self):
return u'%s' % (self._get_image())
def _get_image(self):
if not hasattr(self, '_image_cache'):
self._image_cache = self._get_image_name()
return self._image_cache
def _get_image_name(self):
arg = self.image_type
value = self.original
value_re = re.compile(r'^(%s)' % (URL_WWW))
value_path = value_re.sub('', value)
filename = os.path.split(value_path)[1]
if CHECK_EXISTS:
path = os.path.split(value_path)[0]
if os.path.isfile(os.path.join(PATH_SERVER, path, filename.replace(".",
"_").lower() + IMAGE_GENERATOR_DIRECTORY, arg + filename)):
img_value = os.path.join(os.path.split(value)[0], filename.replace(".",
"_").lower() + IMAGE_GENERATOR_DIRECTORY, arg + filename)
return u'%s' % (img_value)
else:
return u''
else:
img_value = os.path.join(os.path.split(value)[0], filename.replace(".",
"_").lower() + IMAGE_GENERATOR_DIRECTORY, arg + filename)
return u'%s' % (img_value)
class FileBrowserImageType(object):
def __init__(self, original, image_list):
for image_type in image_list:
setattr(self, image_type[0].rstrip('_'), FileBrowserImageSize(image_type[0], original))
class FileBrowserFile(object):
def __init__(self, value):
self.original = value
self._add_image_types()
def _add_image_types(self):
all_prefixes = []
for imgtype in IMAGE_GENERATOR_LANDSCAPE:
if imgtype[0] not in all_prefixes:
all_prefixes.append(imgtype[0])
setattr(self, imgtype[0].rstrip('_'), FileBrowserImageSize(imgtype[0], self.original))
for imgtype in IMAGE_GENERATOR_PORTRAIT:
if imgtype[0] not in all_prefixes:
all_prefixes.append(imgtype[0])
setattr(self, imgtype[0].rstrip('_'), FileBrowserImageSize(imgtype[0], self.original))
def __unicode__(self):
return self.original
def crop(self):
if not hasattr(self, '_crop_cache'):
self._crop_cache = FileBrowserImageType(self.original, IMAGE_CROP_GENERATOR)
return self._crop_cache
class FileBrowseField(Field):
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if isinstance(value, FileBrowserFile):
return value
return FileBrowserFile(value)
def get_db_prep_value(self, value):
return value.original
def get_manipulator_field_objs(self):
return [oldforms.TextField]
def get_internal_type(self):
return "CharField"
def formfield(self, **kwargs):
attrs = {}
attrs["initial_directory"] = self.initial_directory
attrs["extensions_allowed"] = self.extensions_allowed
defaults = {'max_length': self.max_length}
defaults['form_class'] = FileBrowseFormField
defaults['widget'] = FileBrowseWidget(attrs=attrs)
kwargs['initial_directory'] = self.initial_directory
kwargs['extensions_allowed'] = self.extensions_allowed
defaults.update(kwargs)
return super(FileBrowseField, self).formfield(**defaults)
def __init__(self, *args, **kwargs):
try:
self.initial_directory = kwargs['initial_directory']
del kwargs['initial_directory']
except:
self.initial_directory = "/"
try:
self.extensions_allowed = kwargs['extensions_allowed']
del kwargs['extensions_allowed']
except:
self.extensions_allowed = ""
return super(FileBrowseField, self).__init__(*args, **kwargs)
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Control Theory based self-augmentation."""
import random
from collections import namedtuple
import numpy as np
from PIL import Image, ImageOps, ImageEnhance, ImageFilter
OPS = {}
OP = namedtuple('OP', ('f', 'bins'))
Sample = namedtuple('Sample', ('train', 'probe'))
def register(*bins):
def wrap(f):
OPS[f.__name__] = OP(f, bins)
return f
return wrap
def apply(x, ops):
if ops is None:
return x
y = Image.fromarray(np.round(127.5 * (1 + x)).clip(0, 255).astype('uint8'))
for op, args in ops:
y = OPS[op].f(y, *args)
return np.asarray(y).astype('f') / 127.5 - 1
class CTAugment:
def __init__(self, depth: int = 2, th: float = 0.85, decay: float = 0.99):
self.decay = decay
self.depth = depth
self.th = th
self.rates = {}
for k, op in OPS.items():
self.rates[k] = tuple([np.ones(x, 'f') for x in op.bins])
def rate_to_p(self, rate):
p = rate + (1 - self.decay) # Avoid to have all zero.
p = p / p.max()
p[p < self.th] = 0
return p
def policy(self, probe):
kl = list(OPS.keys())
v = []
if probe:
for _ in range(self.depth):
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
v.append(OP(k, rnd.tolist()))
return v
for _ in range(self.depth):
vt = []
k = random.choice(kl)
bins = self.rates[k]
rnd = np.random.uniform(0, 1, len(bins))
for r, bin in zip(rnd, bins):
p = self.rate_to_p(bin)
value = np.random.choice(p.shape[0], p=p / p.sum())
vt.append((value + r) / p.shape[0])
v.append(OP(k, vt))
return v
def update_rates(self, policy, proximity):
for k, bins in policy:
for p, rate in zip(bins, self.rates[k]):
p = int(p * len(rate) * 0.999)
rate[p] = rate[p] * self.decay + proximity * (1 - self.decay)
def stats(self):
return '\n'.join('%-16s %s' % (k, ' / '.join(' '.join('%.2f' % x for x in self.rate_to_p(rate))
for rate in self.rates[k]))
for k in sorted(OPS.keys()))
def _enhance(x, op, level):
return op(x).enhance(0.1 + 1.9 * level)
def _imageop(x, op, level):
return Image.blend(x, op(x), level)
def _filter(x, op, level):
return Image.blend(x, x.filter(op), level)
@register(17)
def autocontrast(x, level):
return _imageop(x, ImageOps.autocontrast, level)
@register(17)
def blur(x, level):
return _filter(x, ImageFilter.BLUR, level)
@register(17)
def brightness(x, brightness):
return _enhance(x, ImageEnhance.Brightness, brightness)
@register(17)
def color(x, color):
return _enhance(x, ImageEnhance.Color, color)
@register(17)
def contrast(x, contrast):
return _enhance(x, ImageEnhance.Contrast, contrast)
@register(17)
def cutout(x, level):
"""Apply cutout to pil_img at the specified level."""
size = 1 + int(level * min(x.size) * 0.499)
img_height, img_width = x.size
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2), min(img_width, width_loc + size // 2))
pixels = x.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (127, 127, 127) # set the color accordingly
return x
@register(17)
def equalize(x, level):
return _imageop(x, ImageOps.equalize, level)
@register(17)
def invert(x, level):
return _imageop(x, ImageOps.invert, level)
@register()
def identity(x):
return x
@register(8)
def posterize(x, level):
level = 1 + int(level * 7.999)
return ImageOps.posterize(x, level)
@register(17, 6)
def rescale(x, scale, method):
s = x.size
scale *= 0.25
crop = (scale * s[0], scale * s[1], s[0] * (1 - scale), s[1] * (1 - scale))
methods = (Image.ANTIALIAS, Image.BICUBIC, Image.BILINEAR, Image.BOX, Image.HAMMING, Image.NEAREST)
method = methods[int(method * 5.99)]
return x.crop(crop).resize(x.size, method)
@register(17)
def rotate(x, angle):
angle = int(np.round((2 * angle - 1) * 45))
return x.rotate(angle)
@register(17)
def sharpness(x, sharpness):
return _enhance(x, ImageEnhance.Sharpness, sharpness)
@register(17)
def shear_x(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, shear, 0, 0, 1, 0))
@register(17)
def shear_y(x, shear):
shear = (2 * shear - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, shear, 1, 0))
@register(17)
def smooth(x, level):
return _filter(x, ImageFilter.SMOOTH, level)
@register(17)
def solarize(x, th):
th = int(th * 255.999)
return ImageOps.solarize(x, th)
@register(17)
def translate_x(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, delta, 0, 1, 0))
@register(17)
def translate_y(x, delta):
delta = (2 * delta - 1) * 0.3
return x.transform(x.size, Image.AFFINE, (1, 0, 0, 0, 1, delta))
|
|
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import unittest
import os
import tarfile
import urllib
import zlib
import mock
from shutil import rmtree
from tempfile import mkdtemp
from StringIO import StringIO
from eventlet import sleep
from mock import patch, call
from swift.common import utils, constraints
from swift.common.middleware import bulk
from swift.common.swob import Request, Response, HTTPException
from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
class FakeApp(object):
def __init__(self):
self.calls = 0
self.delete_paths = []
self.max_pathlen = 100
self.del_cont_total_calls = 2
self.del_cont_cur_call = 0
def __call__(self, env, start_response):
self.calls += 1
if env['PATH_INFO'].startswith('/unauth/'):
if env['PATH_INFO'].endswith('/c/f_ok'):
return Response(status='204 No Content')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/create_cont/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/create_cont_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='403 Forbidden')(env, start_response)
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].startswith('/create_obj_unauth/'):
if env['PATH_INFO'].endswith('/cont'):
return Response(status='201 Created')(env, start_response)
return Response(status=401)(env, start_response)
if env['PATH_INFO'].startswith('/tar_works/'):
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if env['PATH_INFO'].startswith('/tar_works_cont_head_fail/'):
if env['REQUEST_METHOD'] == 'HEAD':
return Response(status='404 Not Found')(env, start_response)
if len(env['PATH_INFO']) > 100:
return Response(status='400 Bad Request')(env, start_response)
return Response(status='201 Created')(env, start_response)
if (env['PATH_INFO'].startswith('/delete_works/')
and env['REQUEST_METHOD'] == 'DELETE'):
self.delete_paths.append(env['PATH_INFO'])
if len(env['PATH_INFO']) > self.max_pathlen:
return Response(status='400 Bad Request')(env, start_response)
if env['PATH_INFO'].endswith('404'):
return Response(status='404 Not Found')(env, start_response)
if env['PATH_INFO'].endswith('badutf8'):
return Response(
status='412 Precondition Failed')(env, start_response)
return Response(status='204 No Content')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_fail/'):
return Response(status='409 Conflict')(env, start_response)
if env['PATH_INFO'].startswith('/broke/'):
return Response(status='500 Internal Error')(env, start_response)
if env['PATH_INFO'].startswith('/delete_cont_success_after_attempts/'):
if self.del_cont_cur_call < self.del_cont_total_calls:
self.del_cont_cur_call += 1
return Response(status='409 Conflict')(env, start_response)
else:
return Response(status='204 No Content')(env, start_response)
def build_dir_tree(start_path, tree_obj):
if isinstance(tree_obj, list):
for obj in tree_obj:
build_dir_tree(start_path, obj)
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.iteritems():
dir_path = os.path.join(start_path, dir_name)
os.mkdir(dir_path)
build_dir_tree(dir_path, obj)
if isinstance(tree_obj, unicode):
tree_obj = tree_obj.encode('utf8')
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
with open(obj_path, 'w+') as tree_file:
tree_file.write('testing')
def build_tar_tree(tar, start_path, tree_obj, base_path=''):
if isinstance(tree_obj, list):
for obj in tree_obj:
build_tar_tree(tar, start_path, obj, base_path=base_path)
if isinstance(tree_obj, dict):
for dir_name, obj in tree_obj.iteritems():
dir_path = os.path.join(start_path, dir_name)
tar_info = tarfile.TarInfo(dir_path[len(base_path):])
tar_info.type = tarfile.DIRTYPE
tar.addfile(tar_info)
build_tar_tree(tar, dir_path, obj, base_path=base_path)
if isinstance(tree_obj, unicode):
tree_obj = tree_obj.encode('utf8')
if isinstance(tree_obj, str):
obj_path = os.path.join(start_path, tree_obj)
tar_info = tarfile.TarInfo('./' + obj_path[len(base_path):])
tar.addfile(tar_info)
class TestUntar(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory({})(self.app)
self.testdir = mkdtemp(suffix='tmp_test_bulk')
def tearDown(self):
self.app.calls = 0
rmtree(self.testdir, ignore_errors=1)
def handle_extract_and_iter(self, req, compress_format,
out_content_type='application/json'):
resp_body = ''.join(
self.bulk.handle_extract_iter(req, compress_format,
out_content_type=out_content_type))
return resp_body
def test_create_container_for_path(self):
req = Request.blank('/')
self.assertEquals(
self.bulk.create_container(req, '/create_cont/acc/cont'),
True)
self.assertEquals(self.app.calls, 2)
self.assertRaises(
bulk.CreateContainerError,
self.bulk.create_container,
req, '/create_cont_fail/acc/cont')
self.assertEquals(self.app.calls, 3)
def test_extract_tar_works(self):
# On systems where $TMPDIR is long (like OS X), we need to do this
# or else every upload will fail due to the path being too long.
self.app.max_pathlen += len(self.testdir)
for compress_format in ['', 'gz', 'bz2']:
base_name = 'base_works_%s' % compress_format
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', u'test obj \u2661']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': '../sub4 file1'}]},
{'sub_dir4': None},
]}]
build_dir_tree(self.testdir, dir_tree)
mode = 'w'
extension = ''
if compress_format:
mode += ':' + compress_format
extension += '.' + compress_format
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar' + extension),
mode=mode)
tar.add(os.path.join(self.testdir, base_name))
tar.close()
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, compress_format)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 6)
# test out xml
req = Request.blank('/tar_works/acc/cont/')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(
req, compress_format, 'application/xml')
self.assert_('<response_status>201 Created</response_status>' in
resp_body)
self.assert_('<number_files_created>6</number_files_created>' in
resp_body)
# test out nonexistent format
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar',
headers={'Accept': 'good_xml'})
req.environ['REQUEST_METHOD'] = 'PUT'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar' + extension))
req.headers['transfer-encoding'] = 'chunked'
def fake_start_response(*args, **kwargs):
pass
app_iter = self.bulk(req.environ, fake_start_response)
resp_body = ''.join([i for i in app_iter])
self.assert_('Response Status: 406' in resp_body)
def test_extract_call(self):
base_name = 'base_works_gz'
dir_tree = [
{base_name: [{'sub_dir1': ['sub1_file1', 'sub1_file2']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'sub_file1',
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar.gz'),
mode='w:gz')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
def fake_start_response(*args, **kwargs):
pass
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar.gz')
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
self.bulk(req.environ, fake_start_response)
self.assertEquals(self.app.calls, 1)
self.app.calls = 0
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
req.headers['transfer-encoding'] = 'Chunked'
req.method = 'PUT'
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEquals(self.app.calls, 7)
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=bad')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar.gz'))
t = self.bulk(req.environ, fake_start_response)
self.assertEquals(t[0], "Unsupported archive format")
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, base_name))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/?extract-archive=tar')
req.method = 'PUT'
req.headers['transfer-encoding'] = 'Chunked'
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'))
app_iter = self.bulk(req.environ, fake_start_response)
list(app_iter) # iter over resp
self.assertEquals(self.app.calls, 7)
def test_bad_container(self):
req = Request.blank('/invalid/', body='')
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('404 Not Found' in resp_body)
def test_content_length_required(self):
req = Request.blank('/create_cont_fail/acc/cont')
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('411 Length Required' in resp_body)
def test_bad_tar(self):
req = Request.blank('/create_cont_fail/acc/cont', body='')
def bad_open(*args, **kwargs):
raise zlib.error('bad tar')
with patch.object(tarfile, 'open', bad_open):
resp_body = self.handle_extract_and_iter(req, '')
self.assertTrue('400 Bad Request' in resp_body)
def build_tar(self, dir_tree=None):
if not dir_tree:
dir_tree = [
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
tar = tarfile.open(name=os.path.join(self.testdir, 'tar_fails.tar'),
mode='w')
build_tar_tree(tar, self.testdir, dir_tree,
base_path=self.testdir + '/')
tar.close()
return tar
def test_extract_tar_with_basefile(self):
dir_tree = [
'base_lvl_file', 'another_base_file',
{'base_fails1': [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/')
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 4)
def test_extract_tar_fail_cont_401(self):
self.build_tar()
req = Request.blank('/unauth/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 1)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '401 Unauthorized')
self.assertEquals(resp_data['Errors'], [])
def test_extract_tar_fail_obj_401(self):
self.build_tar()
req = Request.blank('/create_obj_unauth/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '401 Unauthorized')
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/sub_dir1/sub1_file1', '401 Unauthorized']])
def test_extract_tar_fail_obj_name_len(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 6)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 4)
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
def test_extract_tar_fail_compress_type(self):
self.build_tar()
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, 'gz')
self.assertEquals(self.app.calls, 0)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Response Body'].lower(),
'invalid tar file: not a gzip file')
def test_extract_tar_fail_max_failed_extractions(self):
self.build_tar()
with patch.object(self.bulk, 'max_failed_extractions', 1):
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Files Created'], 3)
self.assertEquals(
resp_data['Errors'],
[['cont/base_fails1/' + ('f' * 101), '400 Bad Request']])
@patch.object(constraints, 'MAX_FILE_SIZE', 4)
def test_extract_tar_fail_max_file_size(self):
tar = self.build_tar()
dir_tree = [{'test': [{'sub_dir1': ['sub1_file1']}]}]
build_dir_tree(self.testdir, dir_tree)
tar = tarfile.open(name=os.path.join(self.testdir,
'tar_works.tar'),
mode='w')
tar.add(os.path.join(self.testdir, 'test'))
tar.close()
self.app.calls = 0
req = Request.blank('/tar_works/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(
os.path.join(self.testdir, 'tar_works.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(
resp_data['Errors'],
[['cont' + self.testdir + '/test/sub_dir1/sub1_file1',
'413 Request Entity Too Large']])
def test_extract_tar_fail_max_cont(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
'f' * 101,
{'sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
with patch.object(self.bulk, 'max_containers', 1):
self.app.calls = 0
body = open(os.path.join(self.testdir, 'tar_fails.tar')).read()
req = Request.blank('/tar_works_cont_head_fail/acc/', body=body,
headers={'Accept': 'application/json'})
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
self.assertEquals(self.app.calls, 5)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Response Body'],
'More than 1 containers to create from tar.')
def test_extract_tar_fail_create_cont(self):
dir_tree = [{'base_fails1': [
{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2_file1', 'sub2_file2']},
{'./sub_dir3': [{'sub4_dir1': 'sub4_file1'}]}]}]
self.build_tar(dir_tree)
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 5)
self.assertEquals(len(resp_data['Errors']), 5)
def test_extract_tar_fail_create_cont_value_err(self):
self.build_tar()
req = Request.blank('/create_cont_fail/acc/cont/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
def bad_create(req, path):
raise ValueError('Test')
with patch.object(self.bulk, 'create_container', bad_create):
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 0)
self.assertEquals(len(resp_data['Errors']), 5)
self.assertEquals(
resp_data['Errors'][0],
['cont/base_fails1/sub_dir1/sub1_file1', '400 Bad Request'])
def test_extract_tar_fail_unicode(self):
dir_tree = [{'sub_dir1': ['sub1_file1']},
{'sub_dir2': ['sub2\xdefile1', 'sub2_file2']},
{'sub_\xdedir3': [{'sub4_dir1': 'sub4_file1'}]}]
self.build_tar(dir_tree)
req = Request.blank('/tar_works/acc/',
headers={'Accept': 'application/json'})
req.environ['wsgi.input'] = open(os.path.join(self.testdir,
'tar_fails.tar'))
req.headers['transfer-encoding'] = 'chunked'
resp_body = self.handle_extract_and_iter(req, '')
resp_data = utils.json.loads(resp_body)
self.assertEquals(self.app.calls, 4)
self.assertEquals(resp_data['Number Files Created'], 2)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(
resp_data['Errors'],
[['sub_dir2/sub2%DEfile1', '412 Precondition Failed'],
['sub_%DEdir3/sub4_dir1/sub4_file1', '412 Precondition Failed']])
def test_get_response_body(self):
txt_body = bulk.get_response_body(
'bad_formay', {'hey': 'there'}, [['json > xml', '202 Accepted']])
self.assert_('hey: there' in txt_body)
xml_body = bulk.get_response_body(
'text/xml', {'hey': 'there'}, [['json > xml', '202 Accepted']])
self.assert_('>' in xml_body)
class TestDelete(unittest.TestCase):
def setUp(self):
self.app = FakeApp()
self.bulk = bulk.filter_factory({})(self.app)
def tearDown(self):
self.app.calls = 0
self.app.delete_paths = []
def handle_delete_and_iter(self, req, out_content_type='application/json'):
resp_body = ''.join(self.bulk.handle_delete_iter(
req, out_content_type=out_content_type))
return resp_body
def test_bulk_delete_uses_predefined_object_errors(self):
req = Request.blank('/delete_works/AUTH_Acc')
objs_to_delete = [
{'name': '/c/file_a'},
{'name': '/c/file_b', 'error': {'code': HTTP_NOT_FOUND,
'message': 'not found'}},
{'name': '/c/file_c', 'error': {'code': HTTP_UNAUTHORIZED,
'message': 'unauthorized'}},
{'name': '/c/file_d'}]
resp_body = ''.join(self.bulk.handle_delete_iter(
req, objs_to_delete=objs_to_delete,
out_content_type='application/json'))
self.assertEquals(
self.app.delete_paths, ['/delete_works/AUTH_Acc/c/file_a',
'/delete_works/AUTH_Acc/c/file_d'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Number Not Found'], 1)
self.assertEquals(resp_data['Errors'],
[['/c/file_c', 'unauthorized']])
def test_bulk_delete_works_with_POST_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_works_with_DELETE_verb(self):
req = Request.blank('/delete_works/AUTH_Acc', body='/c/f\n/c/f404',
headers={'Accept': 'application/json'})
req.method = 'DELETE'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f', '/delete_works/AUTH_Acc/c/f404'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_bad_content_type(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'badformat'})
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json',
'Content-Type': 'text/xml'})
req.method = 'POST'
req.environ['wsgi.input'] = StringIO('/c/f\n/c/f404')
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '406 Not Acceptable')
def test_bulk_delete_call_and_content_type(self):
def fake_start_response(*args, **kwargs):
self.assertEquals(args[1][0], ('Content-Type', 'application/json'))
req = Request.blank('/delete_works/AUTH_Acc?bulk-delete')
req.method = 'POST'
req.headers['Transfer-Encoding'] = 'chunked'
req.headers['Accept'] = 'application/json'
req.environ['wsgi.input'] = StringIO('/c/f%20')
list(self.bulk(req.environ, fake_start_response)) # iterate over resp
self.assertEquals(
self.app.delete_paths, ['/delete_works/AUTH_Acc/c/f '])
self.assertEquals(self.app.calls, 1)
def test_bulk_delete_get_objs(self):
req = Request.blank('/delete_works/AUTH_Acc', body='1%20\r\n2\r\n')
req.method = 'POST'
with patch.object(self.bulk, 'max_deletes_per_request', 2):
results = self.bulk.get_objs_to_delete(req)
self.assertEquals(results, [{'name': '1 '}, {'name': '2'}])
with patch.object(self.bulk, 'max_path_length', 2):
results = []
req.environ['wsgi.input'] = StringIO('1\n2\n3')
results = self.bulk.get_objs_to_delete(req)
self.assertEquals(results,
[{'name': '1'}, {'name': '2'}, {'name': '3'}])
with patch.object(self.bulk, 'max_deletes_per_request', 9):
with patch.object(self.bulk, 'max_path_length', 1):
req_body = '\n'.join([str(i) for i in xrange(10)])
req = Request.blank('/delete_works/AUTH_Acc', body=req_body)
self.assertRaises(
HTTPException, self.bulk.get_objs_to_delete, req)
def test_bulk_delete_works_extra_newlines_extra_quoting(self):
req = Request.blank('/delete_works/AUTH_Acc',
body='/c/f\n\n\n/c/f404\n\n\n/c/%2525',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/f',
'/delete_works/AUTH_Acc/c/f404',
'/delete_works/AUTH_Acc/c/%25'])
self.assertEquals(self.app.calls, 3)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Number Not Found'], 1)
def test_bulk_delete_too_many_newlines(self):
req = Request.blank('/delete_works/AUTH_Acc')
req.method = 'POST'
data = '\n\n' * self.bulk.max_deletes_per_request
req.environ['wsgi.input'] = StringIO(data)
req.content_length = len(data)
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('413 Request Entity Too Large' in resp_body)
def test_bulk_delete_works_unicode(self):
body = (u'/c/ obj \u2661\r\n'.encode('utf8') +
'c/ objbadutf8\r\n' +
'/c/f\xdebadutf8\n')
req = Request.blank('/delete_works/AUTH_Acc', body=body,
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(
self.app.delete_paths,
['/delete_works/AUTH_Acc/c/ obj \xe2\x99\xa1',
'/delete_works/AUTH_Acc/c/ objbadutf8'])
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(len(resp_data['Errors']), 2)
self.assertEquals(
resp_data['Errors'],
[[urllib.quote('c/ objbadutf8'), '412 Precondition Failed'],
[urllib.quote('/c/f\xdebadutf8'), '412 Precondition Failed']])
def test_bulk_delete_no_body(self):
req = Request.blank('/unauth/AUTH_acc/')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('411 Length Required' in resp_body)
def test_bulk_delete_no_files_in_body(self):
req = Request.blank('/unauth/AUTH_acc/', body=' ')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('400 Bad Request' in resp_body)
def test_bulk_delete_unauth(self):
req = Request.blank('/unauth/AUTH_acc/', body='/c/f\n/c/f_ok\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Errors'], [['/c/f', '401 Unauthorized']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Number Deleted'], 1)
def test_bulk_delete_500_resp(self):
req = Request.blank('/broke/AUTH_acc/', body='/c/f\nc/f2\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(
resp_data['Errors'],
[['/c/f', '500 Internal Error'], ['c/f2', '500 Internal Error']])
self.assertEquals(resp_data['Response Status'], '502 Bad Gateway')
def test_bulk_delete_bad_path(self):
req = Request.blank('/delete_cont_fail/')
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('404 Not Found' in resp_body)
def test_bulk_delete_container_delete(self):
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 0)
self.assertEquals(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals([], mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_fails(self):
self.bulk.retry_count = 3
req = Request.blank('/delete_cont_fail/AUTH_Acc', body='c\n',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 0)
self.assertEquals(resp_data['Errors'], [['c', '409 Conflict']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2),
call(self.bulk.retry_interval ** 3)],
mock_sleep.call_args_list)
def test_bulk_delete_container_delete_retry_and_success(self):
self.bulk.retry_count = 3
self.app.del_container_total = 2
req = Request.blank('/delete_cont_success_after_attempts/AUTH_Acc',
body='c\n', headers={'Accept': 'application/json'})
req.method = 'DELETE'
with patch('swift.common.middleware.bulk.sleep',
new=mock.MagicMock(wraps=sleep,
return_value=None)) as mock_sleep:
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 1)
self.assertEquals(resp_data['Errors'], [])
self.assertEquals(resp_data['Response Status'], '200 OK')
self.assertEquals([call(self.bulk.retry_interval),
call(self.bulk.retry_interval ** 2)],
mock_sleep.call_args_list)
def test_bulk_delete_bad_file_too_long(self):
req = Request.blank('/delete_works/AUTH_Acc',
headers={'Accept': 'application/json'})
req.method = 'POST'
bad_file = 'c/' + ('1' * self.bulk.max_path_length)
data = '/c/f\n' + bad_file + '\n/c/f'
req.environ['wsgi.input'] = StringIO(data)
req.headers['Transfer-Encoding'] = 'chunked'
resp_body = self.handle_delete_and_iter(req)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Number Deleted'], 2)
self.assertEquals(resp_data['Errors'], [[bad_file, '400 Bad Request']])
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
def test_bulk_delete_bad_file_over_twice_max_length(self):
body = '/c/f\nc/' + ('123456' * self.bulk.max_path_length) + '\n'
req = Request.blank('/delete_works/AUTH_Acc', body=body)
req.method = 'POST'
resp_body = self.handle_delete_and_iter(req)
self.assertTrue('400 Bad Request' in resp_body)
def test_bulk_delete_max_failures(self):
req = Request.blank('/unauth/AUTH_Acc', body='/c/f1\n/c/f2\n/c/f3',
headers={'Accept': 'application/json'})
req.method = 'POST'
with patch.object(self.bulk, 'max_failed_deletes', 2):
resp_body = self.handle_delete_and_iter(req)
self.assertEquals(self.app.calls, 2)
resp_data = utils.json.loads(resp_body)
self.assertEquals(resp_data['Response Status'], '400 Bad Request')
self.assertEquals(resp_data['Response Body'],
'Max delete failures exceeded')
self.assertEquals(resp_data['Errors'],
[['/c/f1', '401 Unauthorized'],
['/c/f2', '401 Unauthorized']])
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_registered_defaults(self):
bulk.filter_factory({})
swift_info = utils.get_swift_info()
self.assertTrue('bulk_upload' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_containers_per_extraction'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_upload'].get('max_failed_extractions'),
numbers.Integral))
self.assertTrue('bulk_delete' in swift_info)
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_deletes_per_request'),
numbers.Integral))
self.assertTrue(isinstance(
swift_info['bulk_delete'].get('max_failed_deletes'),
numbers.Integral))
if __name__ == '__main__':
unittest.main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paste.urlmap
import re
import urllib2
from nova.api.openstack import wsgi
from nova.openstack.common import log as logging
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*([^\s;=]+|%s)\s*'
r'(?:=\s*([^;]+|%s))?\s*' %
(_quoted_string_re, _quoted_string_re))
LOG = logging.getLogger(__name__)
def unquote_header_value(value):
"""Unquotes a header value.
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
return value
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in urllib2.parse_http_list(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('Content-Type: text/html; mimetype=text/html')
('Content-Type:', {'mimetype': 'text/html'})
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value)
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = parts.next()[0]
extra = dict(parts)
return name, extra
class Accept(object):
def __init__(self, value):
self._content_types = [parse_options_header(v) for v in
parse_list_header(value)]
def best_match(self, supported_content_types):
# FIXME: Should we have a more sophisticated matching algorithm that
# takes into account the version as well?
best_quality = -1
best_content_type = None
best_params = {}
best_match = '*/*'
for content_type in supported_content_types:
for content_mask, params in self._content_types:
try:
quality = float(params.get('q', 1))
except ValueError:
continue
if quality < best_quality:
continue
elif best_quality == quality:
if best_match.count('*') <= content_mask.count('*'):
continue
if self._match_mask(content_mask, content_type):
best_quality = quality
best_content_type = content_type
best_params = params
best_match = content_mask
return best_content_type, best_params
def content_type_params(self, best_content_type):
"""Find parameters in Accept header for given content type."""
for content_type, params in self._content_types:
if best_content_type == content_type:
return params
return {}
def _match_mask(self, mask, content_type):
if '*' not in mask:
return content_type == mask
if mask == '*/*':
return True
mask_major = mask[:-2]
content_type_major = content_type.split('/', 1)[0]
return content_type_major == mask_major
def urlmap_factory(loader, global_conf, **local_conf):
if 'not_found_app' in local_conf:
not_found_app = local_conf.pop('not_found_app')
else:
not_found_app = global_conf.get('not_found_app')
if not_found_app:
not_found_app = loader.get_app(not_found_app, global_conf=global_conf)
urlmap = URLMap(not_found_app=not_found_app)
for path, app_name in local_conf.items():
path = paste.urlmap.parse_path_expression(path)
app = loader.get_app(app_name, global_conf=global_conf)
urlmap[path] = app
return urlmap
class URLMap(paste.urlmap.URLMap):
def _match(self, host, port, path_info):
"""Find longest match for a given URL path."""
for (domain, app_url), app in self.applications:
if domain and domain != host and domain != host + ':' + port:
continue
if (path_info == app_url
or path_info.startswith(app_url + '/')):
return app, app_url
return None, None
def _set_script_name(self, app, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
return app(environ, start_response)
return wrap
def _munge_path(self, app, path_info, app_url):
def wrap(environ, start_response):
environ['SCRIPT_NAME'] += app_url
environ['PATH_INFO'] = path_info[len(app_url):]
return app(environ, start_response)
return wrap
def _path_strategy(self, host, port, path_info):
"""Check path suffix for MIME type and path prefix for API version."""
mime_type = app = app_url = None
parts = path_info.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in wsgi.SUPPORTED_CONTENT_TYPES:
mime_type = possible_type
parts = path_info.split('/')
if len(parts) > 1:
possible_app, possible_app_url = self._match(host, port, path_info)
# Don't use prefix if it ends up matching default
if possible_app and possible_app_url:
app_url = possible_app_url
app = self._munge_path(possible_app, path_info, app_url)
return mime_type, app, app_url
def _content_type_strategy(self, host, port, environ):
"""Check Content-Type header for API version."""
app = None
params = parse_options_header(environ.get('CONTENT_TYPE', ''))[1]
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return app
def _accept_strategy(self, host, port, environ, supported_content_types):
"""Check Accept header for best matching MIME type and API version."""
accept = Accept(environ.get('HTTP_ACCEPT', ''))
app = None
# Find the best match in the Accept header
mime_type, params = accept.best_match(supported_content_types)
if 'version' in params:
app, app_url = self._match(host, port, '/v' + params['version'])
if app:
app = self._set_script_name(app, app_url)
return mime_type, app
def __call__(self, environ, start_response):
host = environ.get('HTTP_HOST', environ.get('SERVER_NAME')).lower()
if ':' in host:
host, port = host.split(':', 1)
else:
if environ['wsgi.url_scheme'] == 'http':
port = '80'
else:
port = '443'
path_info = environ['PATH_INFO']
path_info = self.normalize_url(path_info, False)[1]
# The MIME type for the response is determined in one of two ways:
# 1) URL path suffix (eg /servers/detail.json)
# 2) Accept header (eg application/json;q=0.8, application/xml;q=0.2)
# The API version is determined in one of three ways:
# 1) URL path prefix (eg /v1.1/tenant/servers/detail)
# 2) Content-Type header (eg application/json;version=1.1)
# 3) Accept header (eg application/json;q=0.8;version=1.1)
supported_content_types = list(wsgi.SUPPORTED_CONTENT_TYPES)
mime_type, app, app_url = self._path_strategy(host, port, path_info)
# Accept application/atom+xml for the index query of each API
# version mount point as well as the root index
if (app_url and app_url + '/' == path_info) or path_info == '/':
supported_content_types.append('application/atom+xml')
if not app:
app = self._content_type_strategy(host, port, environ)
if not mime_type or not app:
possible_mime_type, possible_app = self._accept_strategy(
host, port, environ, supported_content_types)
if possible_mime_type and not mime_type:
mime_type = possible_mime_type
if possible_app and not app:
app = possible_app
if not mime_type:
mime_type = 'application/json'
if not app:
# Didn't match a particular version, probably matches default
app, app_url = self._match(host, port, path_info)
if app:
app = self._munge_path(app, path_info, app_url)
if app:
environ['nova.best_content_type'] = mime_type
return app(environ, start_response)
environ['paste.urlmap_object'] = self
return self.not_found_application(environ, start_response)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
from flaky import flaky
from selenium.webdriver.common.keys import Keys
# Bokeh imports
from bokeh._testing.util.selenium import RECORD, enter_text_in_element, hover_element
from bokeh.layouts import column
from bokeh.models import (
AutocompleteInput,
Circle,
ColumnDataSource,
CustomAction,
CustomJS,
Plot,
Range1d,
)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
pytest_plugins = (
"bokeh._testing.plugins.project",
)
def modify_doc(doc):
source = ColumnDataSource(dict(x=[1, 2], y=[1, 1], val=["a", "b"]))
plot = Plot(plot_height=400, plot_width=400, x_range=Range1d(0, 1), y_range=Range1d(0, 1), min_border=0)
plot.add_glyph(source, Circle(x='x', y='y', size=20))
plot.add_tools(CustomAction(callback=CustomJS(args=dict(s=source), code=RECORD("data", "s.data"))))
input_box = AutocompleteInput(css_classes=["foo"])
input_box.title = "title"
input_box.value = "400"
input_box.completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"]
def cb(attr, old, new):
source.data['val'] = [old, new]
input_box.on_change('value', cb)
doc.add_root(column(input_box, plot))
@pytest.mark.selenium
class Test_AutocompleteInput(object):
def test_displays_text_input(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(css_classes=["foo"], completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"])
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo input')
assert el.get_attribute('type') == "text"
assert page.has_no_console_errors()
def test_displays_title(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(title="title", css_classes=["foo"], completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"])
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo label')
assert el.text == "title"
el = page.driver.find_element_by_css_selector('.foo input')
assert el.get_attribute('placeholder') == ""
assert el.get_attribute('type') == "text"
assert page.has_no_console_errors()
def test_displays_menu(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(title="title", css_classes=["foo"], completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"])
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' in el.get_attribute('style')
# double click to highlight and overwrite old text
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "100", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' not in el.get_attribute('style')
items = el.find_elements_by_tag_name("div")
assert len(items) == 1
assert items[0].text == "100001"
assert "bk-active" in items[0].get_attribute('class')
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "123", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' not in el.get_attribute('style')
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
enter_text_in_element(page.driver, el, Keys.DOWN, click=0, enter=False)
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" not in items[0].get_attribute('class')
assert "bk-active" in items[1].get_attribute('class')
assert page.has_no_console_errors()
def test_min_characters(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(title="title", css_classes=["foo"],
completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"],
min_characters=1)
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' in el.get_attribute('style')
# double click to highlight and overwrite old text
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "1", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' not in el.get_attribute('style')
items = el.find_elements_by_tag_name("div")
assert len(items) == 3
assert items[0].text == "100001"
assert items[1].text == "12344556"
assert items[2].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
assert "bk-active" not in items[2].get_attribute('class')
def test_arrow_cannot_escape_menu(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(title="title", css_classes=["foo"], completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"])
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' in el.get_attribute('style')
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "123", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' not in el.get_attribute('style')
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
# arrow down moves to second item
enter_text_in_element(page.driver, el, Keys.DOWN, click=0, enter=False)
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" not in items[0].get_attribute('class')
assert "bk-active" in items[1].get_attribute('class')
# arrow down again has no effect
enter_text_in_element(page.driver, el, Keys.DOWN, click=0, enter=False)
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" not in items[0].get_attribute('class')
assert "bk-active" in items[1].get_attribute('class')
# arrow up moves to first item
enter_text_in_element(page.driver, el, Keys.UP, click=0, enter=False)
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
# arrow up again has no effect
enter_text_in_element(page.driver, el, Keys.UP, click=0, enter=False)
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
assert page.has_no_console_errors()
def test_mouse_hover(self, bokeh_model_page) -> None:
text_input = AutocompleteInput(title="title", css_classes=["foo"], completions = ["100001", "12344556", "12344557", "3194567289", "209374209374"])
page = bokeh_model_page(text_input)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' in el.get_attribute('style')
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "123", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
assert 'display: none;' not in el.get_attribute('style')
items = el.find_elements_by_tag_name("div")
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" in items[0].get_attribute('class')
assert "bk-active" not in items[1].get_attribute('class')
# hover over second element
items = el.find_elements_by_tag_name("div")
hover_element(page.driver, items[1])
assert len(items) == 2
assert items[0].text == "12344556"
assert items[1].text == "12344557"
assert "bk-active" not in items[0].get_attribute('class')
assert "bk-active" in items[1].get_attribute('class')
@flaky(max_runs=10)
def test_server_on_change_no_round_trip_without_enter_or_click(self, bokeh_server_page) -> None:
page = bokeh_server_page(modify_doc)
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "pre", enter=False) # not change event if enter is not pressed
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["a", "b"]
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
#@flaky(max_runs=10)
# TODO (bev) Fix up after GH CI switch
@pytest.mark.skip
@flaky(max_runs=10)
def test_server_on_change_round_trip_full_entry(self, bokeh_server_page) -> None:
page = bokeh_server_page(modify_doc)
# double click to highlight and overwrite old text
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "100001", click=2)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["400", "100001"]
enter_text_in_element(page.driver, el, "12344556", click=2)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["100001", "12344556"]
# Check clicking outside input also triggers
enter_text_in_element(page.driver, el, "3194567289", click=2)
page.click_canvas_at_position(10, 10)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["12344556", "3194567289"]
#@flaky(max_runs=10)
# TODO (bev) Fix up after GH CI switch
@pytest.mark.skip
@flaky(max_runs=10)
def test_server_on_change_round_trip_partial_entry(self, bokeh_server_page) -> None:
page = bokeh_server_page(modify_doc)
# double click to highlight and overwrite old text
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "100", click=2)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["400", "100001"]
enter_text_in_element(page.driver, el, "123", click=2)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["100001", "12344556"]
# Check clicking outside input also triggers
enter_text_in_element(page.driver, el, "319", click=2, enter=False)
page.click_canvas_at_position(10, 10)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["12344556", "3194567289"]
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
@flaky(max_runs=10)
def test_server_on_change_round_trip_menu_entry(self, bokeh_server_page) -> None:
page = bokeh_server_page(modify_doc)
# double click to highlight and overwrite old text
el = page.driver.find_element_by_css_selector('.foo input')
enter_text_in_element(page.driver, el, "123", click=2, enter=False)
enter_text_in_element(page.driver, el, Keys.DOWN, click=0)
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["400", "12344557"]
enter_text_in_element(page.driver, el, "123", click=2, enter=False)
el = page.driver.find_element_by_css_selector('.foo .bk-menu')
items = el.find_elements_by_tag_name("div")
hover_element(page.driver, items[1])
items[1].click()
page.click_custom_action()
results = page.results
assert results['data']['val'] == ["400", "12344557"]
# XXX (bev) disabled until https://github.com/bokeh/bokeh/issues/7970 is resolved
#assert page.has_no_console_errors()
|
|
"""Copyright 2021 Google LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
version 1.1.8
"""
import argparse
import csv
import datetime
import logging
import os
import sys
import zipfile
import boto3
import stratozonedict
# global variables
vm_list = []
vm_tag_list = []
vm_disk_list = []
vm_perf_list = []
# Initiate the parser
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--no_perf', help='Do Not collect performance data.',
action='store_true')
def create_directory(dir_name):
"""Create output directory.
Args:
dir_name: Destination directory
"""
try:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
except Exception as e:
logging.error('error in create_directory')
logging.error(e)
def get_image_info(image_id, l_vm_instance):
"""Get source image info.
Args:
image_id: ID of the source image
l_vm_instance: instance dictionary object
Returns:
Dictionary object.
"""
try:
disk_image = client.describe_images(ImageIds=[image_id,]).get('Images')
if len(disk_image) > 0:
l_vm_instance['OsType'] = disk_image[0].get('PlatformDetails')
l_vm_instance['OsPublisher'] = disk_image[0].get('Description')
else:
l_vm_instance['OsType'] = 'unknown'
l_vm_instance['OsPublisher'] = 'unknown'
return l_vm_instance
except Exception as e:
logging.error('error in get_image_info')
logging.error(e)
l_vm_instance['OsType'] = 'unknown'
l_vm_instance['OsPublisher'] = 'unknown'
return l_vm_instance
def get_image_size_details(instance_type, l_vm_instance):
"""Get image size details.
Args:
instance_type: instance type
l_vm_instance: instance dictionary object
Returns:
Dictionary object.
"""
instance_type_info = (
client.describe_instance_types(
InstanceTypes=[instance_type,]).get('InstanceTypes'))
l_vm_instance['MemoryGiB'] = '{:.1f}'.format(
instance_type_info[0]['MemoryInfo']['SizeInMiB']/1024)
l_vm_instance['AllocatedProcessorCoreCount'] = (
instance_type_info[0]['VCpuInfo']['DefaultCores'])
return l_vm_instance
def report_writer(dictionary_data, field_name_list, file_name):
"""write data contained in dictionary list into csv file.
Args:
dictionary_data: dictionary object
field_name_list: column names
file_name: file name to be created
Returns:
Dictionary object.
"""
try:
logging.info('Writing %s to the disk', file_name)
with open('./output/'+file_name, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=field_name_list)
writer.writeheader()
for dictionary_value in dictionary_data:
writer.writerow(dictionary_value)
except Exception as e:
logging.error('error in report_writer')
logging.error(e)
def generate_disk_data(vm_id):
"""If no disk is found generate disk data to prevend import errors.
Args:
vm_id: Instance ID
"""
disk = stratozonedict.vm_disk.copy()
disk['MachineId'] = vm_id
disk['DiskLabel'] = '/dev/xvda'
disk['SizeInGib'] = '52.5'
disk['StorageTypeLabel'] = 'gp2'
vm_disk_list.append(disk)
def get_disk_info(vm_id, block_device_list, root_device_name):
"""Get attached disk data.
Args:
vm_id: Instance ID
block_device_list: list of attached disks
root_device_name: name of the primary (OS) disk
Returns:
Disk create date.
"""
disk_count = 0
try:
disk_create_date = datetime.datetime.now()
for block_device in block_device_list:
disk = stratozonedict.vm_disk.copy()
volume = client.describe_volumes(
VolumeIds=[block_device['Ebs']['VolumeId'],]).get('Volumes')
disk['MachineId'] = vm_id
disk['DiskLabel'] = block_device['DeviceName']
disk['SizeInGib'] = volume[0]['Size']
disk['StorageTypeLabel'] = volume[0]['VolumeType']
vm_disk_list.append(disk)
disk_count = disk_count + 1
if root_device_name == block_device['DeviceName']:
disk_create_date = block_device['Ebs']['AttachTime']
if disk_count == 0:
generate_disk_data(vm_id)
return disk_create_date
except Exception as e:
if disk_count == 0:
generate_disk_data(vm_id)
logging.error('error in get_disk_info')
logging.error(e)
return disk_create_date
def get_network_interface_info(interface_list, l_vm_instance):
"""Get network interface data.
Args:
interface_list: List of network interfaces
l_vm_instance: instance dictionary object
"""
try:
ip_list = []
for nic_count, interface in enumerate(interface_list):
if nic_count == 0:
l_vm_instance['PrimaryIPAddress'] = interface['PrivateIpAddress']
ip_list.append(interface['PrivateIpAddress'])
if 'Association' in interface:
if len(interface['Association']['PublicIp']) > 0:
l_vm_instance['PublicIPAddress'] = (
interface['Association']['PublicIp'])
ip_list.append(interface['Association']['PublicIp'])
l_vm_instance['IpAddressListSemiColonDelimited'] = (';'.join(ip_list))
except Exception as e:
logging.error('error in get_network_interface_info')
logging.error(e)
def get_instance_tags(vm_id, tag_dictionary, l_vm_instance):
"""Get tags assigned to instance.
Args:
vm_id: Instance ID
tag_dictionary: list of assigned tags
l_vm_instance: instance dictionary object
Returns:
Dictionary object.
"""
try:
# if there is no name tag assigned use instance id as name
l_vm_instance['MachineName'] = vm_id
for tag in tag_dictionary:
tmp_tag = stratozonedict.vm_tag.copy()
tmp_tag['MachineId'] = vm_id
tmp_tag['Key'] = tag['Key']
tmp_tag['Value'] = tag['Value']
if tag['Key'] == 'Name':
l_vm_instance['MachineName'] = tag['Value']
vm_tag_list.append(tmp_tag)
return l_vm_instance
except Exception as e:
logging.error('error in get_instance_tags')
logging.error(e)
return l_vm_instance
def get_metric_data_query(namespace, metric_name,
dimension_name, dimension_value, unit, query_id=''):
"""Get performance metrics JSON query for the VM.
Args:
namespace: Query Namespace
metric_name: Metric name
dimension_name: Dimension name
dimension_value: Dimension value
unit: Unit of measure
query_id: Optional unique ID for the query
Returns:
Formatted JSON query.
"""
if not query_id:
query_id = metric_name.lower()
data_query = {
'Id': query_id,
'MetricStat': {
'Metric': {
'Namespace': namespace,
'MetricName': metric_name,
'Dimensions': [
{
'Name': dimension_name,
'Value': dimension_value
},]
},
'Period': 1800,
'Stat': 'Average',
'Unit': unit
},
'ReturnData': True,
}
return data_query
def get_performance_info(vm_id, region_name, block_device_list):
"""Query system for VM performance data.
Args:
vm_id: instance id.
region_name: name of the AWS region
block_device_list: list of devices (disks) attached to the vm
"""
try:
perf_client = boto3.client('cloudwatch', region_name)
perf_queries = []
disk_count = 0
perf_queries.append(get_metric_data_query('AWS/EC2', 'CPUUtilization',
'InstanceId', vm_id, 'Percent'))
perf_queries.append(get_metric_data_query('AWS/EC2', 'NetworkOut',
'InstanceId', vm_id,
'Bytes'))
perf_queries.append(get_metric_data_query('AWS/EC2', 'NetworkIn',
'InstanceId', vm_id, 'Bytes'))
for block_device in block_device_list:
perf_queries.append(get_metric_data_query('AWS/EBS', 'VolumeReadOps',
'VolumeId',
block_device['Ebs']['VolumeId'],
'Count',
'volumereadops'
+ str(disk_count)))
perf_queries.append(get_metric_data_query('AWS/EBS', 'VolumeWriteOps',
'VolumeId',
block_device['Ebs']['VolumeId'],
'Count',
'volumewriteops'
+ str(disk_count)))
disk_count = disk_count + 1
response = perf_client.get_metric_data(
MetricDataQueries=perf_queries,
StartTime=datetime.datetime.utcnow() - datetime.timedelta(days=30),
EndTime=datetime.datetime.utcnow(),
ScanBy='TimestampAscending'
)
first_arr_size = len(response['MetricDataResults'][0]['Values'])
if (len(response['MetricDataResults'][1]['Values']) >= first_arr_size and
len(response['MetricDataResults'][2]['Values']) >= first_arr_size and
len(response['MetricDataResults'][3]['Values']) >= first_arr_size):
for i in range(0, first_arr_size):
vm_perf_info = stratozonedict.vm_perf.copy()
vm_perf_info['MachineId'] = vm_id
vm_perf_info['TimeStamp'] = (
response['MetricDataResults'][0]['Timestamps'][i].strftime(
'%m/%d/%Y, %H:%M:%S'))
vm_perf_info['CpuUtilizationPercentage'] = '{:.2f}'.format(
response['MetricDataResults'][0]['Values'][i])
vm_perf_info['NetworkBytesPerSecSent'] = '{:.4f}'.format(
response['MetricDataResults'][1]['Values'][i])
vm_perf_info['NetworkBytesPerSecReceived'] = '{:.4f}'.format(
response['MetricDataResults'][2]['Values'][i])
tmp_read_io = 0
tmp_write_io = 0
for j in range(0, disk_count):
tmp_read_io = tmp_read_io + (
response['MetricDataResults'][3 + j]['Values'][i])
tmp_write_io = tmp_write_io + (
response['MetricDataResults'][4 + j]['Values'][i])
vm_perf_info['DiskReadOperationsPerSec'] = '{:.4f}'.format(
(tmp_read_io /1800))
vm_perf_info['DiskWriteOperationsPerSec'] = '{:.4f}'.format(
(tmp_write_io /1800))
vm_perf_info['AvailableMemoryBytes'] = 0
vm_perf_list.append(vm_perf_info)
except Exception as e:
logging.error('error in get_performance_info')
logging.error(e)
def display_script_progress():
"""Display collection progress."""
try:
sys.stdout.write('\r')
sys.stdout.write('%s[%s%s] %i/%i\r' % ('Regions: ', '#'*region_counter,
'.'*(total_regions-region_counter),
region_counter, total_regions))
sys.stdout.flush()
except Exception as e:
logging.error('error in display_script_progress')
logging.error(e)
def region_is_available(l_region):
"""Check if region is enabled.
Args:
l_region: name of the region
Returns:
true/false
"""
regional_sts = boto3.client('sts', l_region)
try:
regional_sts.get_caller_identity()
return True
except Exception as e:
logging.error('error in region_is_available')
logging.error(e)
return False
def zip_files(dir_name, zip_file_name):
"""Compress generated files into zip file for import into stratozone.
Args:
dir_name: source directory
zip_file_name: name of the file to be created
"""
csv_filter = lambda name: 'csv' in name
if os.path.exists(zip_file_name):
os.remove(zip_file_name)
with zipfile.ZipFile(zip_file_name, 'w') as zip_obj:
# Iterate over all the files in directory
for folder_name, subfolders, file_names in os.walk(dir_name):
for file_name in file_names:
if csv_filter(file_name):
file_path = os.path.join(folder_name, file_name)
zip_obj.write(file_path, os.path.basename(file_path))
###########################################################################
# Collect information about deployed instances
###########################################################################
# Read arguments from the command line
args = parser.parse_args()
# create output and log directory
create_directory('./output')
log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(filename='./output/stratozone-aws-export.log',
format=log_format,
level=logging.ERROR)
logging.debug('Starting collection at: %s', datetime.datetime.now())
ec2_client = boto3.client('ec2')
logging.info('Get all regions')
regions = ec2_client.describe_regions(AllRegions=True)
logging.info('Get Organization ID')
region_counter = 0
total_regions = len(regions['Regions'])
# loop through all the regions and for each region get a list of deployed VMs
# process each VM retrieving all basic data as well as performance metrics.
for region in regions['Regions']:
region_counter += 1
if not region_is_available(region['RegionName']):
continue
client = boto3.client('ec2', region['RegionName'])
display_script_progress()
specific_instance = client.describe_instances()
for reservation in specific_instance['Reservations']:
for instance in reservation['Instances']:
if instance.get('State').get('Name') == 'terminated':
continue
vm_instance = stratozonedict.vm_basic_info.copy()
vm_instance['MachineId'] = instance.get('InstanceId')
vm_instance['HostingLocation'] = region.get('RegionName')
vm_instance['MachineTypeLabel'] = instance.get('InstanceType')
vm_instance['MachineStatus'] = instance.get('State').get('Name')
vm_instance = get_image_info(instance.get('ImageId'), vm_instance)
if vm_instance['OsType'] == 'unknown':
tmp_os_value = 'Linux'
if ('windows' in instance.get('PlatformDetails').lower() or
'sql' in instance.get('PlatformDetails').lower()):
tmp_os_value = 'Windows'
vm_instance['OsType'] = tmp_os_value
vm_instance['OsPublisher'] = tmp_os_value
vm_instance = get_image_size_details(instance.get('InstanceType'),
vm_instance)
if 'Tags' in instance:
vm_instance = get_instance_tags(instance.get('InstanceId'),
instance['Tags'],
vm_instance)
else:
vm_instance['MachineName'] = vm_instance['MachineId']
if 'NetworkInterfaces' in instance:
get_network_interface_info(instance['NetworkInterfaces'],
vm_instance)
if not args.no_perf:
get_performance_info(instance['InstanceId'],
region['RegionName'],
instance['BlockDeviceMappings'])
vm_create_timestamp = get_disk_info(instance['InstanceId'],
instance['BlockDeviceMappings'],
instance['RootDeviceName'])
vm_instance['CreateDate'] = vm_create_timestamp
vm_list.append(vm_instance)
# write collected data to files
created_files = 4
field_names = ['MachineId', 'MachineName', 'PrimaryIPAddress',
'PublicIPAddress', 'IpAddressListSemiColonDelimited',
'TotalDiskAllocatedGiB', 'TotalDiskUsedGiB', 'MachineTypeLabel',
'AllocatedProcessorCoreCount', 'MemoryGiB', 'HostingLocation',
'OsType', 'OsPublisher', 'OsName', 'OsVersion',
'MachineStatus', 'ProvisioningState', 'CreateDate',
'IsPhysical', 'Source']
report_writer(vm_list, field_names, 'vmInfo.csv')
if vm_tag_list:
field_names = ['MachineId', 'Key', 'Value']
report_writer(vm_tag_list, field_names, 'tagInfo.csv')
field_names = ['MachineId', 'DiskLabel', 'SizeInGib', 'UsedInGib',
'StorageTypeLabel']
report_writer(vm_disk_list, field_names, 'diskInfo.csv')
field_names = ['MachineId', 'TimeStamp', 'CpuUtilizationPercentage',
'AvailableMemoryBytes', 'DiskReadOperationsPerSec',
'DiskWriteOperationsPerSec', 'NetworkBytesPerSecSent',
'NetworkBytesPerSecReceived']
if not args.no_perf:
report_writer(vm_perf_list, field_names, 'perfInfo.csv')
else:
created_files = 3
zip_files('./output/', 'aws-import-files.zip')
logging.debug('Collection completed at: %s', datetime.datetime.now())
print('\n\nExport Completed. \n')
print('Aws-import-files.zip generated successfully containing {} files.'
.format(created_files))
if args.no_perf:
print('Performance data was not collected.')
|
|
'''
Copyright (c) 2014
Harvard FAS Research Computing
All rights reserved.
Created on May 6, 2014
@author: John Brunelle
@author: Aaron Kitzmiller
'''
# from util import runsh_i
import logging
import re
from datetime import datetime
from slyme.util import runsh_i
logger = logging.getLogger('slyme')
class JobStep(object):
'''
Individual job step row from sacct output
'''
def __init__(self):
'''
Constructor
'''
def __getitem__(self, index):
logger.debug("Looking for %s in JobStep" % index)
if index in self.__dict__:
return self.__dict__[index]
else:
return None
class JobReport(object):
'''
Object that represents sacct output for a given jobid. Information is
stored as one or more JobSteps. Attribute accessors retrieve the overall
information for the JobSteps
The fetch() class method retrieves JobReport objects using a call to sacct
The following field transformations are done to the sacct output:
JobID
JobIDs that contain an additional job step qualifier are split into the
JobID number and the JobStepName. As a result JobID always returns the
numerical value
ReqMem
The ReqMem field is converted to 3 separate fields:
ReqMem_bytes is the number of bytes requested
ReqMem_bytes_per_node is populated with ReqMem_bytes if the ReqMem output ended with 'Mn'
ReqMem_bytes_per_core is populated with ReqMem_bytes if the ReqMem output ended with 'Mc'
Start
The Start is converted to a python datetime
End
The End is converted to a python datetime
State
Mostly this is left as is with the exception of "CANCELLED by <uid>" entries
The uid is parsed out and moved to the CancelledBy field
MaxRSS
This is converted to a kilobyte value and stored in MaxRSS_kB. The JobReport
iterates through the JobSteps and returns the largest value.
CPUTime,UserCPU,SystemCPU,TotalCPU
These values are converted to seconds from the text representation. Values
may include decimals
'''
keys = [
#=== data from sacct
#--- info from main job account line
'JobID',
#str, but currently always representing an integer (no ".STEP_ID")
'User',
#str
'JobName',
#str
'State',
#str
'Partition',
#str
'NCPUS',
#int
'NNodes',
#int
'CPUTime',
#seconds, float
'TotalCPU',
#seconds, float
'UserCPU',
#seconds, float
'SystemCPU',
#seconds, float
#--- max of any main job account line or any steps
'MaxRSS_kB',
#bytes in kB, int
'ReqMem_bytes_per_node',
#bytes, int, or None if not used
'ReqMem_bytes_per_core',
#bytes, int, or None if not used
'ReqMem_bytes',
#bytes, int, or None if not used
'Start',
#Start time
'End',
#End time
'NodeList',
#List of nodes used
'Elapsed',
#Time of the job
#=== derived data
'ReqMem_bytes_total',
#bytes, int, or None if not used
#a computation combining the appropriate request per resource and number of resources
'ReqMem_MB_total',
#Total requested memory in MB.
'CPU_Efficiency',
#float, should be in the range [0.0,1.0), but nothing guarantees that
'CPU_Wasted',
#seconds, float
'CancelledBy',
#If State is CANCELLED by (uid), this is the uid
'MaxRSS_MB',
#Max RSS in MB, int
'Mem_Wasted',
#ReqMem / MaxRSS * 100. An integer percentage.
'MaxVMSize_MB',
#Maximum amount of virtual memory used in BM
'AveVMSize_MB',
#Average amount of virtual memory used in MB.
]
def __init__(self,jobsteps=[]):
'''
Constructor. Takes an array of job steps
'''
self.jobsteps = jobsteps
def __getattr__(self, name):
"""
__getattr__ just calls __getitem__
"""
if name in JobReport.keys:
return self.get_value_for_index(name)
else:
# Gotta do this for getattr calls that are looking
# for methods, etc.
return super(JobReport,self).__getattribute__(name)
def __getitem__(self, index):
'''
Get the value from the correct JobStep
'''
return self.get_value_for_index(index)
def get_value_for_index(self,index):
# If no jobsteps return None
if self.jobsteps is None or len(self.jobsteps) == 0:
logger.debug("No job steps")
return None
# We can cache values for large JobStep arrays
if index in self.__dict__:
return __dict__[index]
# If there is a getter function, use it
funcname = "get_%s" % index
try:
f = getattr(self,funcname)
if callable(f):
return f()
except AttributeError:
pass
# Return the first non-null thing you find
for js in self.jobsteps:
logger.debug("Checking jobstep against index %s" % index)
if js[index] is not None and js[index] != '':
logger.debug("Got %s" % index)
return js[index]
def get_JobName(self):
for js in self.jobsteps:
if js.JobName and js.JobName != 'batch':
return js.JobName
def get_MaxRSS_kB(self):
"""
Gets the max value from the jobsteps
"""
max = -1
for js in self.jobsteps:
if js.MaxRSS_kB > max:
max = js.MaxRSS_kB
return max
def get_MaxRSS_MB(self):
"""
Gets the max value from the jobsteps
"""
max = -1
for js in self.jobsteps:
if js.MaxRSS_MB > max:
max = js.MaxRSS_MB
return max
def get_MaxVMSize_MB(self):
"""
Gets the max value from the jobsteps
"""
max = -1
for js in self.jobsteps:
if js.MaxVMSize_MB > max:
max = js.MaxVMSize_MB
return max
def get_AveVMSize_MB(self):
"""
Gets the max value from the jobsteps
"""
max = -1
for js in self.jobsteps:
if js.AveVMSize_MB > max:
max = js.AveVMSize_MB
return max
def get_NCPUS(self):
"""
Gets the max value from the jobsteps
"""
max = 0
for js in self.jobsteps:
if js.NCPUS > max:
max = js.NCPUS
return max
def get_CPUTime(self):
"""
Largest CPUTime of the individual job steps
"""
cputime = 0
for js in self.jobsteps:
if js.CPUTime > cputime:
cputime = js.CPUTime
return cputime
def get_CPU_Efficiency(self):
if self.CPUTime != 0:
return self.TotalCPU / self.CPUTime
else:
return 0
def get_Mem_Wasted(self):
"""
ReqMem / MaxRSS * 100. An integer percentage.
"""
if self.MaxRSS_MB is not None and self.MaxRSS_MB != 0:
return int(round(float(self.ReqMem_MB_total / self.MaxRSS_MB) * 100))
else:
return 0
|
|
# Copyright 2011 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import manager as compute_manager
import nova.context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.network import manager as network_manager
from nova.network import model as network_model
from nova.network import nova_ipam_lib
from nova.network import rpcapi as network_rpcapi
from nova import objects
from nova.objects import base as obj_base
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common import jsonutils
from nova.pci import pci_device
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_instance_info_cache
from nova.tests.objects import test_pci_device
HOST = "testhost"
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.netconf')
class FakeModel(dict):
"""Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
class FakeNetworkManager(network_manager.NetworkManager):
"""This NetworkManager doesn't call the base class so we can bypass all
inherited service cruft and just perform unit tests.
"""
class FakeDB:
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000010',
'network_id': 1,
'uuid': 'fake-uuid',
'address': 'DC:AD:BE:FF:EF:01'},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000020',
'network_id': 21,
'uuid': 'fake-uuid2',
'address': 'DC:AD:BE:FF:EF:02'},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'instance_uuid': '00000000-0000-0000-0000-000000000030',
'network_id': 31,
'uuid': 'fake-uuid3',
'address': 'DC:AD:BE:FF:EF:03'}]
floating_ips = [dict(address='172.16.1.1',
fixed_ip_id=100),
dict(address='172.16.1.2',
fixed_ip_id=200),
dict(address='173.16.1.2',
fixed_ip_id=210)]
fixed_ips = [dict(test_fixed_ip.fake_fixed_ip,
id=100,
address='172.16.0.1',
virtual_interface_id=0),
dict(test_fixed_ip.fake_fixed_ip,
id=200,
address='172.16.0.2',
virtual_interface_id=1),
dict(test_fixed_ip.fake_fixed_ip,
id=210,
address='173.16.0.2',
virtual_interface_id=2)]
def fixed_ip_get_by_instance(self, context, instance_uuid):
return [dict(address='10.0.0.0'), dict(address='10.0.0.1'),
dict(address='10.0.0.2')]
def network_get_by_cidr(self, context, cidr):
raise exception.NetworkNotFoundForCidr(cidr=cidr)
def network_create_safe(self, context, net):
fakenet = dict(net)
fakenet['id'] = 999
return fakenet
def network_get(self, context, network_id, project_only="allow_none"):
return {'cidr_v6': '2001:db8:69:%x::/64' % network_id}
def network_get_by_uuid(self, context, network_uuid):
raise exception.NetworkNotFoundForUUID(uuid=network_uuid)
def network_get_all(self, context):
raise exception.NoNetworksFound()
def network_get_all_by_uuids(self, context, project_only="allow_none"):
raise exception.NoNetworksFound()
def network_disassociate(self, context, network_id):
return True
def virtual_interface_get_all(self, context):
return self.vifs
def fixed_ips_by_virtual_interface(self, context, vif_id):
return [ip for ip in self.fixed_ips
if ip['virtual_interface_id'] == vif_id]
def fixed_ip_disassociate(self, context, address):
return True
def __init__(self, stubs=None):
self.db = self.FakeDB()
if stubs:
stubs.Set(vif_obj, 'db', self.db)
self.deallocate_called = None
self.deallocate_fixed_ip_calls = []
self.network_rpcapi = network_rpcapi.NetworkAPI()
# TODO(matelakat) method signature should align with the faked one's
def deallocate_fixed_ip(self, context, address=None, host=None,
instance=None):
self.deallocate_fixed_ip_calls.append((context, address, host))
# TODO(matelakat) use the deallocate_fixed_ip_calls instead
self.deallocate_called = address
def _create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
pass
def get_instance_nw_info(context, instance_id, rxtx_factor,
host, instance_uuid=None, **kwargs):
pass
def fake_network(network_id, ipv6=None):
if ipv6 is None:
ipv6 = CONF.use_ipv6
fake_network = {'id': network_id,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % network_id,
'label': 'test%d' % network_id,
'injected': False,
'multi_host': False,
'cidr': '192.168.%d.0/24' % network_id,
'cidr_v6': None,
'netmask': '255.255.255.0',
'netmask_v6': None,
'bridge': 'fake_br%d' % network_id,
'bridge_interface': 'fake_eth%d' % network_id,
'gateway': '192.168.%d.1' % network_id,
'gateway_v6': None,
'broadcast': '192.168.%d.255' % network_id,
'dns1': '192.168.%d.3' % network_id,
'dns2': '192.168.%d.4' % network_id,
'dns3': '192.168.%d.3' % network_id,
'vlan': None,
'host': None,
'project_id': 'fake_project',
'vpn_public_address': '192.168.%d.2' % network_id,
'vpn_public_port': None,
'vpn_private_address': None,
'dhcp_start': None,
'rxtx_base': network_id * 10,
'priority': None,
'deleted': False,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'mtu': None,
'dhcp_server': '192.168.%d.1' % network_id,
'enable_dhcp': True,
'share_address': False}
if ipv6:
fake_network['cidr_v6'] = '2001:db8:0:%x::/64' % network_id
fake_network['gateway_v6'] = '2001:db8:0:%x::1' % network_id
fake_network['netmask_v6'] = '64'
if CONF.flat_injected:
fake_network['injected'] = True
return fake_network
def vifs(n):
for x in xrange(1, n + 1):
yield {'id': x,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:%02x' % x,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
'network_id': x,
'instance_uuid': 'fake-uuid'}
def floating_ip_ids():
for i in xrange(1, 100):
yield i
def fixed_ip_ids():
for i in xrange(1, 100):
yield i
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
def next_fixed_ip(network_id, num_floating_ips=0):
next_id = fixed_ip_id.next()
f_ips = [FakeModel(**next_floating_ip(next_id))
for i in xrange(num_floating_ips)]
return {'id': next_id,
'network_id': network_id,
'address': '192.168.%d.%03d' % (network_id, (next_id + 99)),
'instance_uuid': 1,
'allocated': False,
# and since network_id and vif_id happen to be equivalent
'virtual_interface_id': network_id,
'floating_ips': f_ips}
def next_floating_ip(fixed_ip_id):
next_id = floating_ip_id.next()
return {'id': next_id,
'address': '10.10.10.%03d' % (next_id + 99),
'fixed_ip_id': fixed_ip_id,
'project_id': None,
'auto_assigned': False}
def ipv4_like(ip, match_string):
ip = ip.split('.')
match_octets = match_string.split('.')
for i, octet in enumerate(match_octets):
if octet == '*':
continue
if octet != ip[i]:
return False
return True
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
floating_ips_per_fixed_ip=0):
# stubs is the self.stubs from the test
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network = network_manager.FlatManager(host=HOST)
network.db = db
# reset the fixed and floating ip generators
global floating_ip_id, fixed_ip_id, fixed_ips
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
fixed_ips = []
networks = [fake_network(x) for x in xrange(1, num_networks + 1)]
def fixed_ips_fake(*args, **kwargs):
global fixed_ips
ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
for i in xrange(1, num_networks + 1)
for j in xrange(ips_per_vif)]
fixed_ips = ips
return ips
def floating_ips_fake(context, address):
for ip in fixed_ips:
if address == ip['address']:
return ip['floating_ips']
return []
def fixed_ips_v6_fake():
return ['2001:db8:0:%x::1' % i
for i in xrange(1, num_networks + 1)]
def virtual_interfaces_fake(*args, **kwargs):
return [vif for vif in vifs(num_networks)]
def vif_by_uuid_fake(context, uuid):
return {'id': 1,
'address': 'DE:AD:BE:EF:00:01',
'uuid': uuid,
'network_id': 1,
'network': None,
'instance_uuid': 'fake-uuid'}
def network_get_fake(context, network_id, project_only='allow_none'):
nets = [n for n in networks if n['id'] == network_id]
if not nets:
raise exception.NetworkNotFound(network_id=network_id)
return nets[0]
def update_cache_fake(*args, **kwargs):
pass
def get_subnets_by_net_id(self, context, project_id, network_uuid,
vif_uuid):
i = int(network_uuid[-2:])
subnet_v4 = dict(
cidr='192.168.%d.0/24' % i,
dns1='192.168.%d.3' % i,
dns2='192.168.%d.4' % i,
gateway='192.168.%d.1' % i)
subnet_v6 = dict(
cidr='2001:db8:0:%x::/64' % i,
gateway='2001:db8:0:%x::1' % i)
return [subnet_v4, subnet_v6]
def get_network_by_uuid(context, uuid):
return dict(id=1,
cidr_v6='fe80::/64',
bridge='br0',
label='public')
def get_v4_fake(*args, **kwargs):
ips = fixed_ips_fake(*args, **kwargs)
return [ip['address'] for ip in ips]
def get_v6_fake(*args, **kwargs):
return fixed_ips_v6_fake()
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
stubs.Set(db, 'floating_ip_get_by_fixed_address', floating_ips_fake)
stubs.Set(db, 'virtual_interface_get_by_uuid', vif_by_uuid_fake)
stubs.Set(db, 'network_get_by_uuid', get_network_by_uuid)
stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
stubs.Set(db, 'network_get', network_get_fake)
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
stubs.Set(nova_ipam_lib.NeutronNovaIPAMLib, 'get_subnets_by_net_id',
get_subnets_by_net_id)
stubs.Set(nova_ipam_lib.NeutronNovaIPAMLib, 'get_v4_ips_by_interface',
get_v4_fake)
stubs.Set(nova_ipam_lib.NeutronNovaIPAMLib, 'get_v6_ips_by_interface',
get_v6_fake)
class FakeContext(nova.context.RequestContext):
def is_admin(self):
return True
nw_model = network.get_instance_nw_info(
FakeContext('fakeuser', 'fake_project'),
0, 3, None)
return nw_model
def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
num_networks=1,
ips_per_vif=1,
floating_ips_per_fixed_ip=0):
def get_instance_nw_info(self, context, instance, conductor_api=None):
return fake_get_instance_nw_info(stubs, num_networks=num_networks,
ips_per_vif=ips_per_vif,
floating_ips_per_fixed_ip=floating_ips_per_fixed_ip)
if func is None:
func = get_instance_nw_info
stubs.Set(network_api.API, 'get_instance_nw_info', func)
def stub_out_network_cleanup(stubs):
stubs.Set(network_api.API, 'deallocate_for_instance',
lambda *args, **kwargs: None)
_real_functions = {}
def set_stub_network_methods(stubs):
global _real_functions
cm = compute_manager.ComputeManager
if not _real_functions:
_real_functions = {
'_get_instance_nw_info': cm._get_instance_nw_info,
'_allocate_network': cm._allocate_network,
'_deallocate_network': cm._deallocate_network}
def fake_networkinfo(*args, **kwargs):
return network_model.NetworkInfo()
def fake_async_networkinfo(*args, **kwargs):
return network_model.NetworkInfoAsyncWrapper(fake_networkinfo)
stubs.Set(cm, '_get_instance_nw_info', fake_networkinfo)
stubs.Set(cm, '_allocate_network', fake_async_networkinfo)
stubs.Set(cm, '_deallocate_network', lambda *args, **kwargs: None)
def unset_stub_network_methods(stubs):
global _real_functions
if _real_functions:
cm = compute_manager.ComputeManager
for name in _real_functions:
stubs.Set(cm, name, _real_functions[name])
def stub_compute_with_ips(stubs):
orig_get = compute_api.API.get
orig_get_all = compute_api.API.get_all
orig_create = compute_api.API.create
def fake_get(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get, *args, **kwargs)
def fake_get_all(*args, **kwargs):
return _get_instances_with_cached_ips(orig_get_all, *args, **kwargs)
def fake_create(*args, **kwargs):
return _create_instances_with_cached_ips(orig_create, *args, **kwargs)
def fake_pci_device_get_by_addr(context, node_id, dev_addr):
return test_pci_device.fake_db_dev
stubs.Set(db, 'pci_device_get_by_addr', fake_pci_device_get_by_addr)
stubs.Set(compute_api.API, 'get', fake_get)
stubs.Set(compute_api.API, 'get_all', fake_get_all)
stubs.Set(compute_api.API, 'create', fake_create)
def _get_fake_cache():
def _ip(ip, fixed=True, floats=None):
ip_dict = {'address': ip, 'type': 'fixed'}
if not fixed:
ip_dict['type'] = 'floating'
if fixed and floats:
ip_dict['floating_ips'] = [_ip(f, fixed=False) for f in floats]
return ip_dict
info = [{'address': 'aa:bb:cc:dd:ee:ff',
'id': 1,
'network': {'bridge': 'br0',
'id': 1,
'label': 'private',
'subnets': [{'cidr': '192.168.0.0/24',
'ips': [_ip('192.168.0.3')]}]}}]
if CONF.use_ipv6:
ipv6_addr = 'fe80:b33f::a8bb:ccff:fedd:eeff'
info[0]['network']['subnets'].append({'cidr': 'fe80:b33f::/64',
'ips': [_ip(ipv6_addr)]})
return jsonutils.dumps(info)
def _get_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the cache into instance(s) without having to create DB
entries
"""
instances = orig_func(*args, **kwargs)
context = args[0]
fake_device = objects.PciDevice.get_by_dev_addr(context, 1, 'a')
def _info_cache_for(instance):
info_cache = dict(test_instance_info_cache.fake_info_cache,
network_info=_get_fake_cache(),
instance_uuid=instance['uuid'])
if isinstance(instance, obj_base.NovaObject):
_info_cache = objects.InstanceInfoCache(context)
objects.InstanceInfoCache._from_db_object(context, _info_cache,
info_cache)
info_cache = _info_cache
instance['info_cache'] = info_cache
if isinstance(instances, (list, obj_base.ObjectListBase)):
for instance in instances:
_info_cache_for(instance)
pci_device.claim(fake_device, instance)
pci_device.allocate(fake_device, instance)
else:
_info_cache_for(instances)
pci_device.claim(fake_device, instances)
pci_device.allocate(fake_device, instances)
return instances
def _create_instances_with_cached_ips(orig_func, *args, **kwargs):
"""Kludge the above kludge so that the database doesn't get out
of sync with the actual instance.
"""
instances, reservation_id = orig_func(*args, **kwargs)
fake_cache = _get_fake_cache()
for instance in instances:
instance['info_cache']['network_info'] = fake_cache
db.instance_info_cache_update(args[1], instance['uuid'],
{'network_info': fake_cache})
return (instances, reservation_id)
|
|
from TASSELpy.java.lang.Object import Object
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.java.lang.String import String, metaString
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.java.util.ArrayList import ArrayList
from TASSELpy.java.util.Map import Map
from TASSELpy.java.util.Set import Set
from TASSELpy.javaObj import javaArray
from TASSELpy.utils.helper import make_sig, send_to_java
from TASSELpy.utils.Overloading import javaOverload, javaConstructorOverload, javaStaticOverload
from TASSELpy.utils.Overloading import javaGenericOverload
from abc import ABCMeta
java_imports = {'ArrayList':'java/util/ArrayList',
'Map':'java/util/Map',
'Object':'java/lang/Object',
'Set':'java/util/Set',
'String':'java/lang/String',
'Trait':'net/maizegenetics/trait/Trait'}
class metaTrait:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if issubclass(C, Trait):
return True
else:
return False
class Trait(Comparable):
"""
Data descriptor that contains information about a trait, factor, covariate,
or other item for which data may be stored. It may contain information
about the experimental unit on which data was collected (factors). It may contain information
about how the data is to be used in an analysis. Data values will be stored as doubles.
In the case of discrete variables, the value stored will be 0, 1, ... indicating the variable level.
For discrete variables, labels or names corresponding to each variable level would be stored as well.
"""
_java_name = java_imports['Trait']
## Constructor for trait
# @param name this trait
# @param isDiscrete true if this trait is discrete, false if it is continuous
# @param type the trait type; data, covariate, factor, marker, or exclude
# @param properties a property map, which can be null
@javaConstructorOverload(java_imports['Trait'],
(make_sig([java_imports['String'],'boolean',java_imports['String'],
java_imports['Map']],'void'),
(metaString,metaBoolean,metaString,Map)),
(make_sig([java_imports['String'],'boolean',java_imports['String']],'void'),
(metaString,metaBoolean,metaString)))
def __init__(self, *args, **kwargs):
"""
Constructor for Trait
Signatures:
Trait(String name, boolean isDiscrete, String type, Map<String, Object> properties)
Trait(String name, boolean isDiscrete, String type)
Arguments:
Trait(String name, boolean isDiscrete, String type, Map<String, Object> properties)
name -- this trait
isDicrete -- true if this trait is discrete, false if it is continuous
type -- the trait type; data, covariate, factor, marker, or exclude
properties -- a property map, which can be null
Trait(String name, boolean isDiscrete, String type)
name -- this trait
isDicrete -- true if this trait is discrete, false if it is continuous
type -- the trait type; data, covariate, factor, marker, or exclude
"""
super(Trait, self).__init__(*args, generic=(Trait,), **kwargs)
## Gets a copy of an existing trait
# @param trait the trait to be copied
# @return A copy of the trait
@javaStaticOverload(java_imports['Trait'],"getInstance",
(make_sig([java_imports['Trait']],java_imports['Trait']),
(metaTrait,),lambda x: Trait(obj=x)))
def getInstance(*args):
"""
Gets a copy of an existing trait
Signatures:
static Trait getInstance(Trait trait)
Arguments:
trait -- the trait to be copied
Returns:
A copy of the trait
"""
pass
## Gets the name by which the trait is identified
# @return Name by which the trait is identified
@javaOverload("getName",
(make_sig([],java_imports['String']),(),None))
def getName(self, *args):
"""
Gets the name by which the trait is identified
Signatures:
String getName()
Returns:
Name by which the trait is identified
"""
pass
## Gets whether this trait is discrete
# @return true if this trait is discrete, false if it is continuous
@javaOverload("isDiscrete",
(make_sig([],'boolean'),(),None))
def isDiscrete(self, *args):
"""
Gets whether this trait is discrete
Signatures:
boolean isDiscrete()
Returns:
true if this trait is discrete, false if it is continuous
"""
pass
## Sets the level labels
# @param levelLabels for a discrete trait, the names of the levels or values this
# trait can take
@javaOverload("setLevelLabels",
(make_sig([java_imports['String']+'[]'],'void'),
(javaArray.get_array_type(String),),None))
def setLevelLabels(self, *args):
"""
Sets the level labels
Signatures:
void setLevelLabels(String[] levelLabels)
Arguments:
levelLabels -- for a discrete trait, the names of the levels or values this trait
can take
"""
pass
## Gets the level labels
# @return For a discrete trait, the names of the levels or values this trait can take
@javaOverload("getLevelLabels",
(make_sig([],java_imports['String']+'[]'),(),
String.wrap_existing_array))
def getLevelLabels(self, *args):
"""
Gets the level labels
Signatures:
String[] getLevelLabels()
Returns:
For a discrete trait, the names of the levels or values this trait can take
"""
pass
## Gets the name of a level
# @param level The level number (0,1,...)
# @return The name of this level
@javaOverload("getLevelLabel",
(make_sig(['int'],java_imports['String']),(metaInteger,),None))
def getLevelLabel(self, *args):
"""
Gets the name of a level
Signatures:
String getLevelLabel(int level)
Arguments:
level -- the level number (0,1,...)
Returns:
The name of this level
"""
pass
## Gets the number of levels
# @return The number of levels
@javaOverload("getNumberOfLevels",
(make_sig([],'int'),(),None))
def getNumberOfLevels(self, *args):
"""
Gets the number of levels
Signatures:
int getNumberOfLevels()
Returns:
The number of levels
"""
pass
## Gets the number of entries in the property map
# @return The number of entries in the property map
@javaOverload("getNumberOfProperties",
(make_sig([],'int'),(),None))
def getNumberOfProperties(self, *args):
"""
Gets the number of entries in the property map
Signatures:
int getNumberOfProperties()
Returns:
The number of entries in the property map
"""
pass
@javaGenericOverload("getProperties",
(make_sig([],java_imports['Set']),(),
lambda x: Set(obj=x, generic=(Map.Entry,))))
def _getProperties(self, *args):
pass
## The entry set for this trait's property map
# @return The entry Set for this trait's property map
def getProperties(self, *args):
"""
The entry set for this trait's property map
Signatures:
Set<Entry<String, Object>> getProperties()
Returns:
The entry Set for this trait's property map
"""
props = self._getProperties(*args)
props.generic_dict['/@1/'].generic_dict = {'/@1/':String, '/@2/':Object}
return props
## Gets the value of this property null if property does not exist
# @param The name of a property for this trait
# @return The value of this property, null if the property does not exist
@javaOverload("getProperty",
(make_sig([java_imports['String']],java_imports['Object']),
(metaString,),lambda x: Object(obj=send_to_java(x))))
def getProperty(self, *args):
"""
Gets the value of this property, null if property does not exist
Signatures:
Object getProperty(String propertyName)
Arguments:
propertyName -- The name of a property for this trait
Returns:
The value of this property null if the property does not exist
"""
pass
## Gets all the property names for this trait
# @return All the property names for this trait
@javaGenericOverload("getPropertyNames",
(make_sig([],java_imports['Set']),(),
dict(type=Set,generic=(String,))))
def getPropertyNames(self, *args):
"""
Gets all the property names for this trait
Signatures:
Set<String> getPropertyNames()
Returns:
All the property names for this trait
"""
pass
## Adds a property or sets a new value for an existing property
# @param propertyName a name
# @param value The value for this property
@javaOverload("setProperty",
(make_sig([java_imports['String'],java_imports['Object']],'void'),
(metaString,Object),None))
def setProperty(self, *args):
"""
Adds a property or sets a new value for an existing property
Signatures:
void setProperty(String propertyName, Object value)
Arguments:
propertyName -- a name
value -- The value for this property
"""
pass
## Adds a new factor and its value or changes the value of an existing factor.
# A factor might be an environment or a rep number
# @param name This factor's name
# @param value The factor's value for this trait
@javaOverload("addFactor",
(make_sig([java_imports['String'],java_imports['String']],'void'),
(metaString,metaString),None))
def addFactor(self, *args):
"""
Adds a new factor and its value or changes the value of an existing factor.
A factor might be an environment or a rep number
Signatures:
void addFactor(String name, String value)
Arguments:
name -- this factor's name
value -- the factor's value for this trait
"""
pass
## Gets the number of factors for this trait
# @return The number of factors for this trait
@javaOverload("getNumberOfFactors",
(make_sig([],'int'),(),None))
def getNumberOfFactors(self, *args):
"""
Gets the number of factors for this trait
Signatures:
int getNumberOfFactors()
Returns:
The number of factors for this trait
"""
pass
## Gets the names for the factors for this trait. Returns null if
# there are no factors
# @return Names for the factors for this trait or null if there are no factors
@javaGenericOverload("getFactorNames",
(make_sig([],java_imports['ArrayList']),(),
dict(type=ArrayList,generic=(String,))))
def getFactorNames(self, *args):
"""
Gets the names for the factors for this trait. Returns null if
there are no factors
Signatures:
ArrayList<String> getFactorNames()
Returns:
Names for the factors for this trait. Returns null if there
are no factors
"""
pass
## Gets a factor value
# @return The value of this factor, or null if the factor does not exist
@javaOverload("getFactorValue",
(make_sig([java_imports['String']],java_imports['String']),
(metaString,),None))
def getFactorValue(self, *args):
"""
Gets a factor value
Signatures:
String getFactorValue(String factorName)
Returns:
The value of this factor or null if the factor does not exist
"""
pass
## Gets the type of this trait
# @return The type of this trait: data, covariate, factor, marker, or exclude
@javaOverload("getType",
(make_sig([],java_imports['String']),(),None))
def getType(self, *args):
"""
Gets the type of this trait
Signatures:
String getType()
Returns:
The type of this trait: data, covariate, factor, marker, or exclude
"""
pass
## Sets the type of this trait
# @return The type of this trait: data, covariate, factor, marker, or exclude
@javaOverload("setType",
(make_sig([java_imports['String']],'void'),(metaString,),None))
def setType(self, *args):
"""
Sets the type of this trait
Signatures:
void setType(String type)
Arguments:
type -- The type of this trait: data covariate, factor, marker, or exclude
"""
pass
## Returns whether the trait has level
# @return true if the trait has levels
@javaOverload("hasLevels",
(make_sig([],'boolean'),(),None))
def hasLevels(self, *args):
"""
Returns whether the trait has levels
Signatures:
boolean hasLevels()
Returns:
true if the trait has levels
"""
pass
|
|
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for writing checkers that operate on tokens."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import StringIO
import traceback
import gflags as flags
from closure_linter import ecmametadatapass
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import javascripttokenizer
from closure_linter.common import error
from closure_linter.common import htmlutil
FLAGS = flags.FLAGS
flags.DEFINE_boolean('debug_tokens', False,
'Whether to print all tokens for debugging.')
flags.DEFINE_boolean('error_trace', False,
'Whether to show error exceptions.')
class LintRulesBase(object):
"""Base class for all classes defining the lint rules for a language."""
def __init__(self):
self.__checker = None
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initializes to prepare to check a file.
Args:
checker: Class to report errors to.
limited_doc_checks: Whether doc checking is relaxed for this file.
is_html: Whether the file is an HTML file with extracted contents.
"""
self.__checker = checker
self._limited_doc_checks = limited_doc_checks
self._is_html = is_html
def _HandleError(self, code, message, token, position=None,
fix_data=None):
"""Call the HandleError function for the checker we are associated with."""
if errorrules.ShouldReportError(code):
self.__checker.HandleError(code, message, token, position, fix_data)
def CheckToken(self, token, parser_state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration.
parser_state: Object that indicates the parser state in the page.
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method CheckToken not implemented')
def Finalize(self, parser_state, tokenizer_mode):
"""Perform all checks that need to occur after all lines are processed.
Args:
parser_state: State of the parser after parsing all tokens
tokenizer_mode: Mode of the tokenizer after parsing the entire page
Raises:
TypeError: If not overridden.
"""
raise TypeError('Abstract method Finalize not implemented')
class CheckerBase(object):
"""This class handles checking a LintRules object against a file."""
def __init__(self, error_handler, lint_rules, state_tracker,
limited_doc_files=None, metadata_pass=None):
"""Initialize a checker object.
Args:
error_handler: Object that handles errors.
lint_rules: LintRules object defining lint errors given a token
and state_tracker object.
state_tracker: Object that tracks the current state in the token stream.
limited_doc_files: List of filenames that are not required to have
documentation comments.
metadata_pass: Object that builds metadata about the token stream.
"""
self.__error_handler = error_handler
self.__lint_rules = lint_rules
self.__state_tracker = state_tracker
self.__metadata_pass = metadata_pass
self.__limited_doc_files = limited_doc_files
self.__tokenizer = javascripttokenizer.JavaScriptTokenizer()
self.__has_errors = False
def HandleError(self, code, message, token, position=None,
fix_data=None):
"""Prints out the given error message including a line number.
Args:
code: The error code.
message: The error to print.
token: The token where the error occurred, or None if it was a file-wide
issue.
position: The position of the error, defaults to None.
fix_data: Metadata used for fixing the error.
"""
self.__has_errors = True
self.__error_handler.HandleError(
error.Error(code, message, token, position, fix_data))
def HasErrors(self):
"""Returns true if the style checker has found any errors.
Returns:
True if the style checker has found any errors.
"""
return self.__has_errors
def Check(self, filename, source=None):
"""Checks the file, printing warnings and errors as they are found.
Args:
filename: The name of the file to check.
source: Optional. The contents of the file. Can be either a string or
file-like object. If omitted, contents will be read from disk from
the given filename.
"""
if source is None:
try:
f = open(filename)
except IOError:
self.__error_handler.HandleFile(filename, None)
self.HandleError(errors.FILE_NOT_FOUND, 'File not found', None)
self.__error_handler.FinishFile()
return
else:
if type(source) in [str, unicode]:
f = StringIO.StringIO(source)
else:
f = source
try:
if filename.endswith('.html') or filename.endswith('.htm'):
self.CheckLines(filename, htmlutil.GetScriptLines(f), True)
else:
self.CheckLines(filename, f, False)
finally:
f.close()
def CheckLines(self, filename, lines_iter, is_html):
"""Checks a file, given as an iterable of lines, for warnings and errors.
Args:
filename: The name of the file to check.
lines_iter: An iterator that yields one line of the file at a time.
is_html: Whether the file being checked is an HTML file with extracted
contents.
Returns:
A boolean indicating whether the full file could be checked or if checking
failed prematurely.
"""
limited_doc_checks = False
if self.__limited_doc_files:
for limited_doc_filename in self.__limited_doc_files:
if filename.endswith(limited_doc_filename):
limited_doc_checks = True
break
state_tracker = self.__state_tracker
lint_rules = self.__lint_rules
state_tracker.Reset()
lint_rules.Initialize(self, limited_doc_checks, is_html)
token = self.__tokenizer.TokenizeFile(lines_iter)
parse_error = None
if self.__metadata_pass:
try:
self.__metadata_pass.Reset()
self.__metadata_pass.Process(token)
except ecmametadatapass.ParseError, caught_parse_error:
if FLAGS.error_trace:
traceback.print_exc()
parse_error = caught_parse_error
except Exception:
print 'Internal error in %s' % filename
traceback.print_exc()
return False
self.__error_handler.HandleFile(filename, token)
while token:
if FLAGS.debug_tokens:
print token
if parse_error and parse_error.token == token:
# Report any parse errors from above once we find the token.
message = ('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string)
self.HandleError(errors.FILE_DOES_NOT_PARSE, message, token)
self.__error_handler.FinishFile()
return False
if FLAGS.error_trace:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
else:
try:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
except:
self.HandleError(errors.FILE_DOES_NOT_PARSE,
('Error parsing file at token "%s". Unable to '
'check the rest of file.' % token.string),
token)
self.__error_handler.FinishFile()
return False
# Check the token for style guide violations.
lint_rules.CheckToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
# Move to the next token.
token = token.next
lint_rules.Finalize(state_tracker, self.__tokenizer.mode)
self.__error_handler.FinishFile()
return True
|
|
#
# Copyright (c) 2009-2015, Jack Poulson
# All rights reserved.
#
# This file is part of Elemental and is under the BSD 2-Clause License,
# which can be found in the LICENSE file in the root directory, or at
# http://opensource.org/licenses/BSD-2-Clause
#
from El.core import *
from El.blas_like import Copy, CopyFromRoot, CopyFromNonRoot, RealPart, ImagPart
# Input/Output
# ************
lib.ElPrint_i.argtypes = \
lib.ElPrint_s.argtypes = \
lib.ElPrint_d.argtypes = \
lib.ElPrint_c.argtypes = \
lib.ElPrint_z.argtypes = \
lib.ElPrintDist_i.argtypes = \
lib.ElPrintDist_s.argtypes = \
lib.ElPrintDist_d.argtypes = \
lib.ElPrintDist_c.argtypes = \
lib.ElPrintDist_z.argtypes = \
lib.ElPrintDistMultiVec_i.argtypes = \
lib.ElPrintDistMultiVec_s.argtypes = \
lib.ElPrintDistMultiVec_d.argtypes = \
lib.ElPrintDistMultiVec_c.argtypes = \
lib.ElPrintDistMultiVec_z.argtypes = \
lib.ElPrintGraph.argtypes = \
lib.ElPrintDistGraph.argtypes = \
lib.ElPrintSparse_i.argtypes = \
lib.ElPrintSparse_s.argtypes = \
lib.ElPrintSparse_d.argtypes = \
lib.ElPrintSparse_c.argtypes = \
lib.ElPrintSparse_z.argtypes = \
lib.ElPrintDistSparse_i.argtypes = \
lib.ElPrintDistSparse_s.argtypes = \
lib.ElPrintDistSparse_d.argtypes = \
lib.ElPrintDistSparse_c.argtypes = \
lib.ElPrintDistSparse_z.argtypes = \
[c_void_p,c_char_p]
lib.ElPrint_i.restype = \
lib.ElPrint_s.restype = \
lib.ElPrint_d.restype = \
lib.ElPrint_c.restype = \
lib.ElPrint_z.restype = \
lib.ElPrintDist_i.restype = \
lib.ElPrintDist_s.restype = \
lib.ElPrintDist_d.restype = \
lib.ElPrintDist_c.restype = \
lib.ElPrintDist_z.restype = \
lib.ElPrintDistMultiVec_i.restype = \
lib.ElPrintDistMultiVec_s.restype = \
lib.ElPrintDistMultiVec_d.restype = \
lib.ElPrintDistMultiVec_c.restype = \
lib.ElPrintDistMultiVec_z.restype = \
lib.ElPrintGraph.restype = \
lib.ElPrintDistGraph.restype = \
lib.ElPrintSparse_i.restype = \
lib.ElPrintSparse_s.restype = \
lib.ElPrintSparse_d.restype = \
lib.ElPrintSparse_c.restype = \
lib.ElPrintSparse_z.restype = \
lib.ElPrintDistSparse_i.restype = \
lib.ElPrintDistSparse_s.restype = \
lib.ElPrintDistSparse_d.restype = \
lib.ElPrintDistSparse_c.restype = \
lib.ElPrintDistSparse_z.restype = \
c_uint
def Print(A,title=''):
args = [A.obj,title]
if type(A) is Matrix:
if A.tag == iTag: lib.ElPrint_i(*args)
elif A.tag == sTag: lib.ElPrint_s(*args)
elif A.tag == dTag: lib.ElPrint_d(*args)
elif A.tag == cTag: lib.ElPrint_c(*args)
elif A.tag == zTag: lib.ElPrint_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElPrintDist_i(*args)
elif A.tag == sTag: lib.ElPrintDist_s(*args)
elif A.tag == dTag: lib.ElPrintDist_d(*args)
elif A.tag == cTag: lib.ElPrintDist_c(*args)
elif A.tag == zTag: lib.ElPrintDist_z(*args)
else: DataExcept()
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElPrintDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElPrintDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElPrintDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElPrintDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElPrintDistMultiVec_z(*args)
else: DataExcept()
elif type(A) is Graph:
lib.ElPrintGraph(*args)
elif type(A) is DistGraph:
lib.ElPrintDistGraph(*args)
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElPrintSparse_i(*args)
elif A.tag == sTag: lib.ElPrintSparse_s(*args)
elif A.tag == dTag: lib.ElPrintSparse_d(*args)
elif A.tag == cTag: lib.ElPrintSparse_c(*args)
elif A.tag == zTag: lib.ElPrintSparse_z(*args)
else: DataExcept()
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElPrintDistSparse_i(*args)
elif A.tag == sTag: lib.ElPrintDistSparse_s(*args)
elif A.tag == dTag: lib.ElPrintDistSparse_d(*args)
elif A.tag == cTag: lib.ElPrintDistSparse_c(*args)
elif A.tag == zTag: lib.ElPrintDistSparse_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElSetColorMap.argtypes = [c_uint]
lib.ElSetColorMap.restype = c_uint
def SetColorMap(colorMap):
lib.ElSetColorMap(colorMap)
lib.ElGetColorMap.argtypes = [POINTER(c_uint)]
lib.ElGetColorMap.restype = c_uint
def ColorMap():
colorMap = c_uint()
lib.ElGetColorMap(pointer(colorMap))
return colorMap
lib.ElSetNumDiscreteColors.argtypes = [iType]
lib.ElSetNumDiscreteColors.restype = c_uint
def SetNumDiscreteColors(numColors):
lib.ElSetNumDiscreteColors(numColors)
lib.ElNumDiscreteColors.argtypes = [POINTER(iType)]
lib.ElNumDiscreteColors.restype = c_uint
def NumDiscreteColors():
numDiscrete = iType()
lib.ElNumDiscreteColors(pointer(numDiscrete))
return numDiscrete
lib.ElProcessEvents.argtypes = [c_int]
lib.ElProcessEvents.restype = c_uint
def ProcessEvents(numMsecs):
lib.ElProcessEvents(numMsecs)
lib.ElDisplay_i.argtypes = \
lib.ElDisplay_s.argtypes = \
lib.ElDisplay_d.argtypes = \
lib.ElDisplay_c.argtypes = \
lib.ElDisplay_z.argtypes = \
lib.ElDisplayDist_i.argtypes = \
lib.ElDisplayDist_s.argtypes = \
lib.ElDisplayDist_d.argtypes = \
lib.ElDisplayDist_c.argtypes = \
lib.ElDisplayDist_z.argtypes = \
lib.ElDisplayDistMultiVec_i.argtypes = \
lib.ElDisplayDistMultiVec_s.argtypes = \
lib.ElDisplayDistMultiVec_d.argtypes = \
lib.ElDisplayDistMultiVec_c.argtypes = \
lib.ElDisplayDistMultiVec_z.argtypes = \
lib.ElDisplayGraph.argtypes = \
lib.ElDisplayDistGraph.argtypes = \
lib.ElDisplaySparse_i.argtypes = \
lib.ElDisplaySparse_s.argtypes = \
lib.ElDisplaySparse_d.argtypes = \
lib.ElDisplaySparse_c.argtypes = \
lib.ElDisplaySparse_z.argtypes = \
lib.ElDisplayDistSparse_i.argtypes = \
lib.ElDisplayDistSparse_s.argtypes = \
lib.ElDisplayDistSparse_d.argtypes = \
lib.ElDisplayDistSparse_c.argtypes = \
lib.ElDisplayDistSparse_z.argtypes = \
[c_void_p,c_char_p]
lib.ElDisplay_i.restype = \
lib.ElDisplay_s.restype = \
lib.ElDisplay_d.restype = \
lib.ElDisplay_c.restype = \
lib.ElDisplay_z.restype = \
lib.ElDisplayDist_i.restype = \
lib.ElDisplayDist_s.restype = \
lib.ElDisplayDist_d.restype = \
lib.ElDisplayDist_c.restype = \
lib.ElDisplayDist_z.restype = \
lib.ElDisplayDistMultiVec_i.restype = \
lib.ElDisplayDistMultiVec_s.restype = \
lib.ElDisplayDistMultiVec_d.restype = \
lib.ElDisplayDistMultiVec_c.restype = \
lib.ElDisplayDistMultiVec_z.restype = \
lib.ElDisplayGraph.restype = \
lib.ElDisplayDistGraph.restype = \
lib.ElDisplaySparse_i.restype = \
lib.ElDisplaySparse_s.restype = \
lib.ElDisplaySparse_d.restype = \
lib.ElDisplaySparse_c.restype = \
lib.ElDisplaySparse_z.restype = \
lib.ElDisplayDistSparse_i.restype = \
lib.ElDisplayDistSparse_s.restype = \
lib.ElDisplayDistSparse_d.restype = \
lib.ElDisplayDistSparse_c.restype = \
lib.ElDisplayDistSparse_z.restype = \
c_uint
def Display(A,title='',tryPython=True):
if tryPython:
if type(A) is Matrix:
try:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
isInline = 'inline' in mpl.get_backend()
isVec = min(A.Height(),A.Width()) == 1
if A.tag == cTag or A.tag == zTag:
AReal = Matrix(Base(A.tag))
AImag = Matrix(Base(A.tag))
RealPart(A,AReal)
ImagPart(A,AImag)
fig, (ax1,ax2) = plt.subplots(1,2)
ax1.set_title('Real part')
ax2.set_title('Imag part')
if isVec:
ax1.plot(np.squeeze(AReal.ToNumPy()),'bo-')
ax2.plot(np.squeeze(AImag.ToNumPy()),'bo-')
else:
imReal = ax1.imshow(AReal.ToNumPy())
cBarReal = fig.colorbar(imReal,ax=ax1)
imImag = ax2.imshow(AImag.ToNumPy())
cBarImag = fig.colorbar(imImag,ax=ax2)
plt.suptitle(title)
plt.tight_layout()
else:
fig = plt.figure()
axis = fig.add_axes([0.1,0.1,0.8,0.8])
if isVec:
axis.plot(np.squeeze(A.ToNumPy()),'bo-')
else:
im = axis.imshow(A.ToNumPy())
fig.colorbar(im,ax=axis)
plt.title(title)
plt.draw()
if not isInline:
plt.show(block=False)
return
except:
print 'Could not import matplotlib.pyplot'
elif type(A) is DistMatrix:
A_CIRC_CIRC = DistMatrix(A.tag,CIRC,CIRC,A.Grid())
Copy(A,A_CIRC_CIRC)
if A_CIRC_CIRC.CrossRank() == A_CIRC_CIRC.Root():
Display(A_CIRC_CIRC.Matrix(),title,True)
return
elif type(A) is DistMultiVec:
if mpi.Rank(A.Comm()) == 0:
ASeq = Matrix(A.tag)
CopyFromRoot(A,ASeq)
Display(ASeq,title,True)
else:
CopyFromNonRoot(A)
return
elif type(A) is Graph:
try:
import matplotlib.pyplot as plt
import networkx as nx
numEdges = A.NumEdges()
G = nx.DiGraph()
for edge in xrange(numEdges):
source = A.Source(edge)
target = A.Target(edge)
G.add_edge(source,target)
fig = plt.figure()
plt.title(title)
nx.draw(G)
plt.draw()
if not isInline:
plt.show(block=False)
return
except:
print 'Could not import networkx and matplotlib.pyplot'
elif type(A) is DistGraph:
if mpi.Rank(A.Comm()) == 0:
ASeq = Graph()
CopyFromRoot(A,ASeq)
Display(ASeq,title,True)
else:
CopyFromNonRoot(A)
return
elif type(A) is SparseMatrix:
ADense = Matrix(A.tag)
Copy(A,ADense)
Display(ADense,title,True)
return
elif type(A) is DistSparseMatrix:
grid = Grid(A.Comm())
ADense = DistMatrix(A.tag,MC,MR,grid)
Copy(A,ADense)
Display(ADense,title,True)
return
# Fall back to the built-in Display if we have not succeeded
args = [A.obj,title]
numMsExtra = 200
if type(A) is Matrix:
if A.tag == iTag: lib.ElDisplay_i(*args)
elif A.tag == sTag: lib.ElDisplay_s(*args)
elif A.tag == dTag: lib.ElDisplay_d(*args)
elif A.tag == cTag: lib.ElDisplay_c(*args)
elif A.tag == zTag: lib.ElDisplay_z(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElDisplayDist_i(*args)
elif A.tag == sTag: lib.ElDisplayDist_s(*args)
elif A.tag == dTag: lib.ElDisplayDist_d(*args)
elif A.tag == cTag: lib.ElDisplayDist_c(*args)
elif A.tag == zTag: lib.ElDisplayDist_z(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
elif type(A) is DistMultiVec:
if A.tag == iTag: lib.ElDisplayDistMultiVec_i(*args)
elif A.tag == sTag: lib.ElDisplayDistMultiVec_s(*args)
elif A.tag == dTag: lib.ElDisplayDistMultiVec_d(*args)
elif A.tag == cTag: lib.ElDisplayDistMultiVec_c(*args)
elif A.tag == zTag: lib.ElDisplayDistMultiVec_z(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
elif type(A) is Graph:
lib.ElDisplayGraph(*args)
ProcessEvents(numMsExtra)
elif type(A) is DistGraph:
lib.ElDisplayDistGraph(*args)
ProcessEvents(numMsExtra)
elif type(A) is SparseMatrix:
if A.tag == iTag: lib.ElDisplaySparse_i(*args)
elif A.tag == sTag: lib.ElDisplaySparse_s(*args)
elif A.tag == dTag: lib.ElDisplaySparse_d(*args)
elif A.tag == cTag: lib.ElDisplaySparse_c(*args)
elif A.tag == zTag: lib.ElDisplaySparse_z(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
elif type(A) is DistSparseMatrix:
if A.tag == iTag: lib.ElDisplayDistSparse_i(*args)
elif A.tag == sTag: lib.ElDisplayDistSparse_s(*args)
elif A.tag == dTag: lib.ElDisplayDistSparse_d(*args)
elif A.tag == cTag: lib.ElDisplayDistSparse_c(*args)
elif A.tag == zTag: lib.ElDisplayDistSparse_z(*args)
else: DataExcept()
ProcessEvents(numMsExtra)
else: TypeExcept()
lib.ElSpy_i.argtypes = \
lib.ElSpyDist_i.argtypes = \
[c_void_p,c_char_p,iType]
lib.ElSpy_s.argtypes = \
lib.ElSpyDist_s.argtypes = \
[c_void_p,c_char_p,sType]
lib.ElSpy_d.argtypes = \
lib.ElSpyDist_d.argtypes = \
[c_void_p,c_char_p,dType]
lib.ElSpy_c.argtypes = \
lib.ElSpyDist_c.argtypes = \
[c_void_p,c_char_p,sType]
lib.ElSpy_z.argtypes = \
lib.ElSpyDist_z.argtypes = \
[c_void_p,c_char_p,dType]
lib.ElSpy_i.restype = \
lib.ElSpy_s.restype = \
lib.ElSpy_d.restype = \
lib.ElSpy_c.restype = \
lib.ElSpy_z.restype = \
lib.ElSpyDist_i.restype = \
lib.ElSpyDist_s.restype = \
lib.ElSpyDist_d.restype = \
lib.ElSpyDist_c.restype = \
lib.ElSpyDist_z.restype = \
c_uint
def Spy(A,title='',tol=0):
args = [A.obj,title,tol]
if type(A) is Matrix:
if A.tag == iTag: lib.ElSpy_i(*args)
elif A.tag == sTag: lib.ElSpy_s(*args)
elif A.tag == dTag: lib.ElSpy_d(*args)
elif A.tag == cTag: lib.ElSpy_c(*args)
elif A.tag == zTag: lib.ElSpy_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElSpyDist_i(*args)
elif A.tag == sTag: lib.ElSpyDist_s(*args)
elif A.tag == dTag: lib.ElSpyDist_d(*args)
elif A.tag == cTag: lib.ElSpyDist_c(*args)
elif A.tag == zTag: lib.ElSpyDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElRead_i.argtypes = \
lib.ElRead_s.argtypes = \
lib.ElRead_d.argtypes = \
lib.ElRead_c.argtypes = \
lib.ElRead_z.argtypes = \
lib.ElReadDist_i.argtypes = \
lib.ElReadDist_s.argtypes = \
lib.ElReadDist_d.argtypes = \
lib.ElReadDist_c.argtypes = \
lib.ElReadDist_z.argtypes = \
[c_void_p,c_char_p,c_uint]
lib.ElRead_i.restype = \
lib.ElRead_s.restype = \
lib.ElRead_d.restype = \
lib.ElRead_c.restype = \
lib.ElRead_z.restype = \
lib.ElReadDist_i.restype = \
lib.ElReadDist_s.restype = \
lib.ElReadDist_d.restype = \
lib.ElReadDist_c.restype = \
lib.ElReadDist_z.restype = \
c_uint
def Read(A,filename,fileFormat=AUTO):
args = [A.obj,filename,fileFormat]
if type(A) is Matrix:
if A.tag == iTag: lib.ElRead_i(*args)
elif A.tag == sTag: lib.ElRead_s(*args)
elif A.tag == dTag: lib.ElRead_d(*args)
elif A.tag == cTag: lib.ElRead_c(*args)
elif A.tag == zTag: lib.ElRead_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElReadDist_i(*args)
elif A.tag == sTag: lib.ElReadDist_s(*args)
elif A.tag == dTag: lib.ElReadDist_d(*args)
elif A.tag == cTag: lib.ElReadDist_c(*args)
elif A.tag == zTag: lib.ElReadDist_z(*args)
else: DataExcept()
else: TypeExcept()
lib.ElWrite_i.argtypes = \
lib.ElWrite_s.argtypes = \
lib.ElWrite_d.argtypes = \
lib.ElWrite_c.argtypes = \
lib.ElWrite_z.argtypes = \
lib.ElWriteDist_i.argtypes = \
lib.ElWriteDist_s.argtypes = \
lib.ElWriteDist_d.argtypes = \
lib.ElWriteDist_c.argtypes = \
lib.ElWriteDist_z.argtypes = \
[c_void_p,c_char_p,c_uint,c_char_p]
lib.ElWrite_i.restype = \
lib.ElWrite_s.restype = \
lib.ElWrite_d.restype = \
lib.ElWrite_c.restype = \
lib.ElWrite_z.restype = \
lib.ElWriteDist_i.restype = \
lib.ElWriteDist_s.restype = \
lib.ElWriteDist_d.restype = \
lib.ElWriteDist_c.restype = \
lib.ElWriteDist_z.restype = \
c_uint
def Write(A,basename,fileFormat,title=''):
args = [A.obj,basename,fileFormat,title]
if type(A) is Matrix:
if A.tag == iTag: lib.ElWrite_i(*args)
elif A.tag == sTag: lib.ElWrite_s(*args)
elif A.tag == dTag: lib.ElWrite_d(*args)
elif A.tag == cTag: lib.ElWrite_c(*args)
elif A.tag == zTag: lib.ElWrite_z(*args)
else: DataExcept()
elif type(A) is DistMatrix:
if A.tag == iTag: lib.ElWriteDist_i(*args)
elif A.tag == sTag: lib.ElWriteDist_s(*args)
elif A.tag == dTag: lib.ElWriteDist_d(*args)
elif A.tag == cTag: lib.ElWriteDist_c(*args)
elif A.tag == zTag: lib.ElWriteDist_z(*args)
else: DataExcept()
else: TypeExcept()
|
|
#! python
# -*- coding: utf-8 -*-
# This is a utility module for integrating email functionality into VizAlerts.
import smtplib
import re
import os.path
from email.encoders import encode_base64
# added for MIME handling
from itertools import chain
from errno import ECONNREFUSED
from mimetypes import guess_type
from subprocess import Popen, PIPE
from io import StringIO
from email.header import Header
from email.generator import Generator
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from socket import error as SocketError
# import local modules
from . import config
from . import log
from . import vizalert
# regular expression used to split recipient address strings into separate email addresses
EMAIL_RECIP_SPLIT_REGEX = '[\t\n\s;,]'
class Email(object):
"""Represents an email to be sent"""
def __init__(self, fromaddr, toaddrs, subject, content, ccaddrs=None, bccaddrs=None, inlineattachments=None,
appendattachments=None):
self.fromaddr = fromaddr
self.toaddrs = toaddrs
self.subject = subject
self.content = content
self.ccaddrs = ccaddrs
self.bccaddrs = bccaddrs
self.inlineattachments = inlineattachments
self.appendattachments = appendattachments
# REVISIT: Should add other methods in this module to this class? Validation, at least.
def send_email(email_instance):
"""Generic function to send an email. The presumption is that all arguments have been validated prior to the call
to this function.
Input arguments are:
fromaddr single email address
toaddr string of recipient email addresses separated by the list of separators in EMAIL_RECIP_SPLIT_REGEX
subject string that is subject of email
content body of email, may contain HTML
ccaddrs cc recipients, see toaddr
bccaddrs bcc recipients, see toaddr
inlineattachments List of vizref dicts where each dict has one attachment. The minimum dict has an
imagepath key that points to the file to be attached.
appendattachments Appended (non-inline attachments). See inlineattachments for details on structure.
Nothing is returned by this function unless there is an exception.
"""
try:
log.logger.info(
'sending email: {},{},{},{},{},{},{}'.format(config.configs['smtp.serv'], email_instance.fromaddr,
email_instance.toaddrs, email_instance.ccaddrs,
email_instance.bccaddrs,
email_instance.subject, email_instance.inlineattachments,
email_instance.appendattachments))
log.logger.debug('email body: {}'.format(email_instance.content))
# using mixed type because there can be inline and non-inline attachments
msg = MIMEMultipart('mixed')
msg.set_charset('UTF-8')
msg['From'] = Header(email_instance.fromaddr)
msg['Subject'] = Header(email_instance.subject, 'utf-8')
log.logger.debug('TO ADDRESS: {}'.format(email_instance.toaddrs))
# Process direct recipients
toaddrs = [address for address in filter(None, re.split(EMAIL_RECIP_SPLIT_REGEX, email_instance.toaddrs)) if len(address) > 0]
msg['To'] = Header(', '.join(toaddrs))
allrecips = toaddrs
log.logger.debug('CC ADDRESS: {}'.format(email_instance.ccaddrs))
# Process indirect recipients
if email_instance.ccaddrs:
ccaddrs = [address for address in filter(None, re.split(EMAIL_RECIP_SPLIT_REGEX, email_instance.ccaddrs)) if len(address) > 0]
msg['CC'] = Header(', '.join(ccaddrs))
allrecips.extend(ccaddrs)
log.logger.debug('BCC ADDRESS: {}'.format(email_instance.bccaddrs))
if email_instance.bccaddrs:
bccaddrs = [address for address in filter(None, re.split(EMAIL_RECIP_SPLIT_REGEX, email_instance.bccaddrs)) if len(address) > 0]
# don't add to header, they are blind carbon-copied
allrecips.extend(bccaddrs)
# Create a section for the body and inline attachments
msgalternative = MIMEMultipart('related')
msg.attach(msgalternative)
msgalternative.attach(MIMEText(email_instance.content, 'html', 'utf-8'))
# Add inline attachments
if email_instance.inlineattachments != None:
for vizref in email_instance.inlineattachments:
msgalternative.attach(mimify_file(vizref['imagepath'], inline=True))
# Add appended attachments from Email Attachments field and prevent dup custom filenames
# MC: Feels like this code should be in VizAlert class? Or module? Not sure, leaving it here for now
appendedfilenames = []
if email_instance.appendattachments != None:
appendattachments = vizalert.merge_pdf_attachments(email_instance.appendattachments)
for vizref in appendattachments:
# if there is no |filename= option set then use the exported imagepath
if 'filename' not in vizref:
msg.attach(mimify_file(vizref['imagepath'], inline=False))
else:
# we need to make sure the custom filename is unique, if so then
# use the custom filename
if vizref['filename'] not in appendedfilenames:
appendedfilenames.append(vizref['filename'])
msg.attach(mimify_file(vizref['imagepath'], inline=False, overridename=vizref['filename']))
# use the exported imagepath
else:
msg.attach(mimify_file(vizref['imagepath'], inline=False))
log.logger.info('Warning: attempted to attach duplicate filename ' + vizref[
'filename'] + ', using unique auto-generated name instead.')
server = smtplib.SMTP(config.configs['smtp.serv'], config.configs['smtp.port'])
if config.configs['smtp.ssl']:
server.ehlo()
server.starttls()
if config.configs['smtp.user']:
server.login(str(config.configs['smtp.user']), str(config.configs['smtp.password']))
# from http://wordeology.com/computer/how-to-send-good-unicode-email-with-python.html
io = StringIO()
g = Generator(io, False) # second argument means "should I mangle From?"
g.flatten(msg)
server.sendmail(email_instance.fromaddr, [addr for addr in allrecips],
io.getvalue())
server.quit()
except smtplib.SMTPConnectError as e:
log.logger.error('Email failed to send; there was an issue connecting to the SMTP server: {}'.format(e))
raise e
except smtplib.SMTPHeloError as e:
log.logger.error('Email failed to send; the SMTP server refused our HELO message: {}'.format(e))
raise e
except smtplib.SMTPAuthenticationError as e:
log.logger.error('Email failed to send; there was an issue authenticating to SMTP server: {}'.format(e))
raise e
except smtplib.SMTPException as e:
log.logger.error('Email failed to send; there was an issue sending mail via SMTP server: {}'.format(e))
raise e
except Exception as e:
log.logger.error('Email failed to send: {}'.format(e))
raise e
def addresses_are_invalid(emailaddresses, emptystringok, regex_eval=None):
"""Validates all email addresses found in a given string, optionally that conform to the regex_eval"""
log.logger.debug('Validating email field value: {}'.format(emailaddresses))
address_list = [address for address in filter(None, re.split(EMAIL_RECIP_SPLIT_REGEX, emailaddresses)) if len(address) > 0]
for address in address_list:
log.logger.debug('Validating presumed email address: {}'.format(address))
if emptystringok and (address == '' or address is None):
return None
else:
errormessage = address_is_invalid(address, regex_eval)
if errormessage:
log.logger.debug('Address is invalid: {}, Error: {}'.format(address, errormessage))
if len(address) > 64:
address = address[:64] + '...' # truncate a too-long address for error formattting purposes
return {'address': address, 'errormessage': errormessage}
return None
def address_is_invalid(address, regex_eval=None):
"""Checks for a syntactically invalid email address."""
# (most code derived from from http://zeth.net/archive/2008/05/03/email-syntax-check)
# Email address must not be empty
if address is None or len(address) == 0 or address == '':
errormessage = 'Address is empty'
log.logger.error(errormessage)
return errormessage
# Validate address according to admin regex
if regex_eval:
log.logger.debug("testing address {} against regex {}".format(address, regex_eval))
if not re.match(regex_eval, address, re.IGNORECASE):
errormessage = 'Address must match regex pattern set by the administrator: {}'.format(regex_eval)
log.logger.error(errormessage)
return errormessage
# Email address must be 6 characters in total.
# This is not an RFC defined rule but is easy
if len(address) < 6:
errormessage = 'Address is too short: {}'.format(address)
log.logger.error(errormessage)
return errormessage
# Unicode in addresses not yet supported
try:
address.encode(encoding='ascii', errors='strict')
except Exception as e:
errormessage = 'Address must contain only ASCII characers: {}'.format(address)
log.logger.error(errormessage)
return errormessage
# Split up email address into parts.
try:
localpart, domainname = address.rsplit('@', 1)
host, toplevel = domainname.rsplit('.', 1)
log.logger.debug('Splitting Address: localpart, domainname, host, toplevel: {},{},{},{}'.format(localpart,
domainname,
host,
toplevel))
except ValueError:
errormessage = 'Address has too few parts'
log.logger.error(errormessage)
return errormessage
for i in '-_.%+.':
localpart = localpart.replace(i, "")
for i in '-_.':
host = host.replace(i, "")
log.logger.debug('Removing other characters from address: localpart, host: {},{}'.format(localpart, host))
# check for length
if len(localpart) > 64:
errormessage = 'Localpart of address exceeds max length (65 characters)'
log.logger.error(errormessage)
return errormessage
if len(address) > 254:
errormessage = 'Address exceeds max length (254 characters)'
log.logger.error(errormessage)
return errormessage
if localpart.isalnum() and host.isalnum():
return None # Email address is fine.
else:
errormessage = 'Address has funny characters'
log.logger.error(errormessage)
return errormessage
def mimify_file(filename, inline=True, overridename=None):
"""Returns an appropriate MIME object for the given file.
:param filename: A valid path to a file
:type filename: str
:returns: A MIME object for the given file
:rtype: instance of MIMEBase
"""
filename = os.path.abspath(os.path.expanduser(filename))
if overridename:
basefilename = overridename
else:
basefilename = os.path.basename(filename)
if inline:
msg = MIMEBase(*get_mimetype(filename))
msg.set_payload(open(filename, "rb").read())
msg.add_header('Content-ID', '<{}>'.format(basefilename))
msg.add_header('Content-Disposition', 'inline; filename="%s"' % basefilename)
else:
msg = MIMEBase(*get_mimetype(filename))
msg.set_payload(open(filename, "rb").read())
if overridename:
basefilename = overridename
msg.add_header('Content-Disposition', 'attachment; filename="%s"' % basefilename)
encode_base64(msg)
return msg
def get_mimetype(filename):
"""Returns the MIME type of the given file.
:param filename: A valid path to a file
:type filename: str
:returns: The file's MIME type
:rtype: tuple
"""
content_type, encoding = guess_type(filename)
if content_type is None or encoding is not None:
content_type = "application/octet-stream"
return content_type.split("/", 1)
def validate_addresses(vizdata,
allowed_from_address,
allowed_recipient_addresses,
email_action_actionfield,
email_to_actionfield,
email_from_actionfield,
email_cc_actionfield,
email_bcc_actionfield):
"""Loops through the viz data for an Advanced Alert and returns a list of dicts
containing any errors found in recipients"""
errorlist = []
rownum = 2 # account for field header in CSV
for row in vizdata:
if len(row) > 0:
if email_action_actionfield.get_value_from_dict(row) == '1':\
email_to = email_to_actionfield.get_value_from_dict(row)
log.logger.debug('Validating "To" addresses: {}'.format(email_to))
result = addresses_are_invalid(email_to, False, allowed_recipient_addresses) # empty string not acceptable as a To address
if result:
errorlist.append(
{'Row': rownum, 'Field': (email_to_actionfield.field_name if email_to_actionfield.field_name else email_to_actionfield.name),
'Value': result['address'], 'Error': result['errormessage']})
email_from = email_from_actionfield.get_value_from_dict(row)
log.logger.debug('Validating "From" addresses: {}'.format(email_from))
result = addresses_are_invalid(email_from, False, allowed_from_address) # empty string not acceptable as a From address
if result:
errorlist.append({'Row': rownum, 'Field': (email_from_actionfield.field_name if email_from_actionfield.field_name else email_from_actionfield.name),
'Value': result['address'], 'Error': result['errormessage']})
# REVISIT THIS!
if email_cc_actionfield.field_name:
log.logger.debug('Validating "CC" addresses')
result = addresses_are_invalid(row[email_cc_actionfield.field_name], True, allowed_recipient_addresses)
if result:
errorlist.append({'Row': rownum, 'Field': email_cc_actionfield.field_name, 'Value': result['address'],
'Error': result['errormessage']})
if email_bcc_actionfield.field_name:
log.logger.debug('Validating "BCC" addresses')
result = addresses_are_invalid(row[email_bcc_actionfield.field_name], True, allowed_recipient_addresses)
if result:
errorlist.append({'Row': rownum, 'Field': email_bcc_actionfield.field_name, 'Value': result['address'],
'Error': result['errormessage']})
rownum += 1
return errorlist
|
|
import pytest
from awx.api.versioning import reverse
from awx.main.models import Project
@pytest.fixture
def organization_resource_creator(organization, user):
def rf(users, admins, job_templates, projects, inventories, teams):
# Associate one resource of every type with the organization
for i in range(users):
member_user = user('org-member %s' % i)
organization.member_role.members.add(member_user)
for i in range(admins):
admin_user = user('org-admin %s' % i)
organization.admin_role.members.add(admin_user)
for i in range(teams):
organization.teams.create(name='org-team %s' % i)
for i in range(inventories):
inventory = organization.inventories.create(name="associated-inv %s" % i)
for i in range(projects):
Project.objects.create(
name="test-proj %s" % i,
description="test-proj-desc",
organization=organization
)
# Mix up the inventories and projects used by the job templates
i_proj = 0
i_inv = 0
for i in range(job_templates):
project = Project.objects.filter(organization=organization)[i_proj]
# project = organization.projects.all()[i_proj]
inventory = organization.inventories.all()[i_inv]
project.jobtemplates.create(name="test-jt %s" % i,
description="test-job-template-desc",
inventory=inventory,
playbook="test_playbook.yml",
organization=organization)
i_proj += 1
i_inv += 1
if i_proj >= Project.objects.filter(organization=organization).count():
i_proj = 0
if i_inv >= organization.inventories.count():
i_inv = 0
return organization
return rf
COUNTS_PRIMES = {
'users': 11,
'admins': 5,
'job_templates': 3,
'projects': 3,
'inventories': 7,
'teams': 5
}
COUNTS_ZEROS = {
'users': 0,
'admins': 0,
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.fixture
def resourced_organization(organization_resource_creator):
return organization_resource_creator(**COUNTS_PRIMES)
@pytest.mark.django_db
def test_org_counts_detail_admin(resourced_organization, user, get):
# Check that all types of resources are counted by a superuser
external_admin = user('admin', True)
response = get(reverse('api:organization_detail',
kwargs={'pk': resourced_organization.pk}), external_admin)
assert response.status_code == 200
counts = response.data['summary_fields']['related_field_counts']
assert counts == COUNTS_PRIMES
@pytest.mark.django_db
def test_org_counts_detail_member(resourced_organization, user, get):
# Check that a non-admin org member can only see users / admin in detail view
member_user = resourced_organization.member_role.members.get(username='org-member 1')
response = get(reverse('api:organization_detail',
kwargs={'pk': resourced_organization.pk}), member_user)
assert response.status_code == 200
counts = response.data['summary_fields']['related_field_counts']
assert counts == {
'users': COUNTS_PRIMES['users'], # Policy is that members can see other users and admins
'admins': COUNTS_PRIMES['admins'],
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.mark.django_db
def test_org_counts_list_admin(resourced_organization, user, get):
# Check that all types of resources are counted by a superuser
external_admin = user('admin', True)
response = get(reverse('api:organization_list'), external_admin)
assert response.status_code == 200
counts = response.data['results'][0]['summary_fields']['related_field_counts']
assert counts == COUNTS_PRIMES
@pytest.mark.django_db
def test_org_counts_list_member(resourced_organization, user, get):
# Check that a non-admin user can only see the full project and
# user count, consistent with the RBAC rules
member_user = resourced_organization.member_role.members.get(username='org-member 1')
response = get(reverse('api:organization_list'), member_user)
assert response.status_code == 200
counts = response.data['results'][0]['summary_fields']['related_field_counts']
assert counts == {
'users': COUNTS_PRIMES['users'], # Policy is that members can see other users and admins
'admins': COUNTS_PRIMES['admins'],
'job_templates': 0,
'projects': 0,
'inventories': 0,
'teams': 0
}
@pytest.mark.django_db
def test_new_org_zero_counts(user, post):
# Check that a POST to the organization list endpoint returns
# correct counts, including the new record
org_list_url = reverse('api:organization_list')
post_response = post(url=org_list_url, data={'name': 'test organization',
'description': ''}, user=user('admin', True))
assert post_response.status_code == 201
new_org_list = post_response.render().data
counts_dict = new_org_list['summary_fields']['related_field_counts']
assert counts_dict == COUNTS_ZEROS
@pytest.mark.django_db
def test_two_organizations(resourced_organization, organizations, user, get):
# Check correct results for two organizations are returned
external_admin = user('admin', True)
organization_zero = organizations(1)[0]
response = get(reverse('api:organization_list'), external_admin)
assert response.status_code == 200
org_id_full = resourced_organization.id
org_id_zero = organization_zero.id
counts = {}
for i in range(2):
org_id = response.data['results'][i]['id']
counts[org_id] = response.data['results'][i]['summary_fields']['related_field_counts']
assert counts[org_id_full] == COUNTS_PRIMES
assert counts[org_id_zero] == COUNTS_ZEROS
@pytest.mark.django_db
def test_scan_JT_counted(resourced_organization, user, get):
admin_user = user('admin', True)
counts_dict = COUNTS_PRIMES
# Test list view
list_response = get(reverse('api:organization_list'), admin_user)
assert list_response.status_code == 200
assert list_response.data['results'][0]['summary_fields']['related_field_counts'] == counts_dict
# Test detail view
detail_response = get(reverse('api:organization_detail', kwargs={'pk': resourced_organization.pk}), admin_user)
assert detail_response.status_code == 200
assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict
@pytest.mark.django_db
def test_JT_not_double_counted(resourced_organization, user, get):
admin_user = user('admin', True)
proj = Project.objects.filter(organization=resourced_organization).all()[0]
# Add a run job template to the org
proj.jobtemplates.create(
job_type='run',
inventory=resourced_organization.inventories.all()[0],
project=proj,
name='double-linked-job-template',
organization=resourced_organization)
counts_dict = COUNTS_PRIMES
counts_dict['job_templates'] += 1
# Test list view
list_response = get(reverse('api:organization_list'), admin_user)
assert list_response.status_code == 200
assert list_response.data['results'][0]['summary_fields']['related_field_counts'] == counts_dict
# Test detail view
detail_response = get(reverse('api:organization_detail', kwargs={'pk': resourced_organization.pk}), admin_user)
assert detail_response.status_code == 200
assert detail_response.data['summary_fields']['related_field_counts'] == counts_dict
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Framework of debug-wrapped sessions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.debug import debug_data
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestDebugWrapperSession(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test."""
def __init__(self, sess, dump_root, observer):
# Supply dump root.
self._dump_root = dump_root
# Supply observer.
self._obs = observer
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
"""Override abstract on-session-init callback method."""
self._obs["sess_init_count"] += 1
self._obs["request_sess"] = request.session
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Override abstract on-run-start callback method."""
self._obs["on_run_start_count"] += 1
self._obs["run_fetches"] = request.fetches
self._obs["run_feed_dict"] = request.feed_dict
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
["file://" + self._dump_root])
def on_run_end(self, request):
"""Override abstract on-run-end callback method."""
self._obs["on_run_end_count"] += 1
self._obs["performed_action"] = request.performed_action
self._obs["tf_error"] = request.tf_error
return framework.OnRunEndResponse()
class TestDebugWrapperSessionBadAction(framework.BaseDebugWrapperSession):
"""A concrete implementation of BaseDebugWrapperSession for test.
This class intentionally puts a bad action value in OnSessionInitResponse
and/or in OnRunStartAction to test the handling of such invalid cases.
"""
def __init__(
self,
sess,
bad_init_action=None,
bad_run_start_action=None,
bad_debug_urls=None):
"""Constructor.
Args:
sess: The TensorFlow Session object to be wrapped.
bad_init_action: (str) bad action value to be returned during the
on-session-init callback.
bad_run_start_action: (str) bad action value to be returned during the
the on-run-start callback.
bad_debug_urls: Bad URL values to be returned during the on-run-start
callback.
"""
self._bad_init_action = bad_init_action
self._bad_run_start_action = bad_run_start_action
self._bad_debug_urls = bad_debug_urls
# Invoke superclass constructor.
framework.BaseDebugWrapperSession.__init__(self, sess)
def on_session_init(self, request):
if self._bad_init_action:
return framework.OnSessionInitResponse(self._bad_init_action)
else:
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
debug_urls = self._bad_debug_urls or []
if self._bad_run_start_action:
return framework.OnRunStartResponse(
self._bad_run_start_action, debug_urls)
else:
return framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN, debug_urls)
def on_run_end(self, request):
return framework.OnRunEndResponse()
class DebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._observer = {
"sess_init_count": 0,
"request_sess": None,
"on_run_start_count": 0,
"run_fetches": None,
"run_feed_dict": None,
"on_run_end_count": 0,
"performed_action": None,
"tf_error": None,
}
self._dump_root = tempfile.mkdtemp()
self._sess = session.Session()
self._a_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
self._b_init_val = np.array([[2.0], [-1.0]])
self._c_val = np.array([[-4.0], [6.0]])
self._a_init = constant_op.constant(
self._a_init_val, shape=[2, 2], name="a1_init")
self._b_init = constant_op.constant(
self._b_init_val, shape=[2, 1], name="b_init")
self._ph = array_ops.placeholder(dtype=dtypes.float64, name="ph")
self._a = variables.Variable(self._a_init, name="a1")
self._b = variables.Variable(self._b_init, name="b")
self._c = constant_op.constant(self._c_val, shape=[2, 1], name="c")
# Matrix product of a and b.
self._p = math_ops.matmul(self._a, self._b, name="p1")
# Matrix product of a and ph.
self._q = math_ops.matmul(self._a, self._ph, name="q")
# Sum of two vectors.
self._s = math_ops.add(self._p, self._c, name="s")
# Initialize the variables.
self._sess.run(self._a.initializer)
self._sess.run(self._b.initializer)
def tearDown(self):
# Tear down temporary dump directory.
shutil.rmtree(self._dump_root)
ops.reset_default_graph()
def testSessionInit(self):
self.assertEqual(0, self._observer["sess_init_count"])
wrapper_sess = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# Assert that on-session-init callback is invoked.
self.assertEqual(1, self._observer["sess_init_count"])
# Assert that the request to the on-session-init callback carries the
# correct session object.
self.assertEqual(self._sess, self._observer["request_sess"])
# Verify that the wrapper session implements the session.SessionInterface.
self.assertTrue(isinstance(wrapper_sess, session.SessionInterface))
self.assertEqual(self._sess.sess_str, wrapper_sess.sess_str)
self.assertEqual(self._sess.graph, wrapper_sess.graph)
# Check that the partial_run_setup and partial_run are not implemented for
# the debug wrapper session.
with self.assertRaises(NotImplementedError):
wrapper_sess.partial_run_setup(self._p)
def testInteractiveSessionInit(self):
"""The wrapper should work also on other subclassses of session.Session."""
TestDebugWrapperSession(
session.InteractiveSession(), self._dump_root, self._observer)
def testSessionRun(self):
wrapper = TestDebugWrapperSession(
self._sess, self._dump_root, self._observer)
# Check initial state of the observer.
self.assertEqual(0, self._observer["on_run_start_count"])
self.assertEqual(0, self._observer["on_run_end_count"])
s = wrapper.run(self._s)
# Assert the run return value is correct.
self.assertAllClose(np.array([[3.0], [4.0]]), s)
# Assert the on-run-start method is invoked.
self.assertEqual(1, self._observer["on_run_start_count"])
# Assert the on-run-start request reflects the correct fetch.
self.assertEqual(self._s, self._observer["run_fetches"])
# Assert the on-run-start request reflects the correct feed_dict.
self.assertIsNone(self._observer["run_feed_dict"])
# Assert the file debug URL has led to dump on the filesystem.
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(7, len(dump.dumped_tensor_data))
# Assert the on-run-end method is invoked.
self.assertEqual(1, self._observer["on_run_end_count"])
# Assert the performed action field in the on-run-end callback request is
# correct.
self.assertEqual(
framework.OnRunStartAction.DEBUG_RUN,
self._observer["performed_action"])
# No TensorFlow runtime error should have happened.
self.assertIsNone(self._observer["tf_error"])
def testSessionInitInvalidSessionType(self):
"""Attempt to wrap a non-Session-type object should cause an exception."""
wrapper = TestDebugWrapperSessionBadAction(self._sess)
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
TestDebugWrapperSessionBadAction(wrapper)
def testSessionInitBadActionValue(self):
with self.assertRaisesRegexp(
ValueError, "Invalid OnSessionInitAction value: nonsense_action"):
TestDebugWrapperSessionBadAction(
self._sess, bad_init_action="nonsense_action")
def testRunStartBadActionValue(self):
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_run_start_action="nonsense_action")
with self.assertRaisesRegexp(
ValueError, "Invalid OnRunStartAction value: nonsense_action"):
wrapper.run(self._s)
def testRunStartBadURLs(self):
# debug_urls ought to be a list of str, not a str. So an exception should
# be raised during a run() call.
wrapper = TestDebugWrapperSessionBadAction(
self._sess, bad_debug_urls="file://foo")
with self.assertRaisesRegexp(TypeError, "Expected type .*; got type .*"):
wrapper.run(self._s)
def testErrorDuringRun(self):
wrapper = TestDebugWrapperSession(self._sess, self._dump_root,
self._observer)
# No matrix size mismatch.
self.assertAllClose(
np.array([[11.0], [-1.0]]),
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0]])}))
self.assertEqual(1, self._observer["on_run_end_count"])
self.assertIsNone(self._observer["tf_error"])
# Now there should be a matrix size mismatch error.
wrapper.run(self._q, feed_dict={self._ph: np.array([[1.0], [2.0], [3.0]])})
self.assertEqual(2, self._observer["on_run_end_count"])
self.assertTrue(
isinstance(self._observer["tf_error"], errors.InvalidArgumentError))
if __name__ == "__main__":
googletest.main()
|
|
"""Collection of useful functions for the HomeKit component."""
from collections import OrderedDict, namedtuple
import logging
import voluptuous as vol
from homeassistant.components import fan, media_player, sensor
from homeassistant.const import (
ATTR_CODE, ATTR_SUPPORTED_FEATURES, CONF_NAME, CONF_TYPE, TEMP_CELSIUS)
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
import homeassistant.util.temperature as temp_util
from .const import (
CONF_FEATURE, CONF_FEATURE_LIST, CONF_LINKED_BATTERY_SENSOR,
CONF_LOW_BATTERY_THRESHOLD, DEFAULT_LOW_BATTERY_THRESHOLD, FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE, FEATURE_PLAY_STOP, FEATURE_TOGGLE_MUTE,
HOMEKIT_NOTIFY_ID, TYPE_FAUCET, TYPE_OUTLET, TYPE_SHOWER, TYPE_SPRINKLER,
TYPE_SWITCH, TYPE_VALVE)
_LOGGER = logging.getLogger(__name__)
BASIC_INFO_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LINKED_BATTERY_SENSOR): cv.entity_domain(sensor.DOMAIN),
vol.Optional(CONF_LOW_BATTERY_THRESHOLD,
default=DEFAULT_LOW_BATTERY_THRESHOLD): cv.positive_int,
})
FEATURE_SCHEMA = BASIC_INFO_SCHEMA.extend({
vol.Optional(CONF_FEATURE_LIST, default=None): cv.ensure_list,
})
CODE_SCHEMA = BASIC_INFO_SCHEMA.extend({
vol.Optional(ATTR_CODE, default=None): vol.Any(None, cv.string),
})
MEDIA_PLAYER_SCHEMA = vol.Schema({
vol.Required(CONF_FEATURE): vol.All(
cv.string, vol.In((FEATURE_ON_OFF, FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP, FEATURE_TOGGLE_MUTE))),
})
SWITCH_TYPE_SCHEMA = BASIC_INFO_SCHEMA.extend({
vol.Optional(CONF_TYPE, default=TYPE_SWITCH): vol.All(
cv.string, vol.In((
TYPE_FAUCET, TYPE_OUTLET, TYPE_SHOWER, TYPE_SPRINKLER,
TYPE_SWITCH, TYPE_VALVE))),
})
def validate_entity_config(values):
"""Validate config entry for CONF_ENTITY."""
if not isinstance(values, dict):
raise vol.Invalid('expected a dictionary')
entities = {}
for entity_id, config in values.items():
entity = cv.entity_id(entity_id)
domain, _ = split_entity_id(entity)
if not isinstance(config, dict):
raise vol.Invalid('The configuration for {} must be '
' a dictionary.'.format(entity))
if domain in ('alarm_control_panel', 'lock'):
config = CODE_SCHEMA(config)
elif domain == media_player.const.DOMAIN:
config = FEATURE_SCHEMA(config)
feature_list = {}
for feature in config[CONF_FEATURE_LIST]:
params = MEDIA_PLAYER_SCHEMA(feature)
key = params.pop(CONF_FEATURE)
if key in feature_list:
raise vol.Invalid('A feature can be added only once for {}'
.format(entity))
feature_list[key] = params
config[CONF_FEATURE_LIST] = feature_list
elif domain == 'switch':
config = SWITCH_TYPE_SCHEMA(config)
else:
config = BASIC_INFO_SCHEMA(config)
entities[entity] = config
return entities
def validate_media_player_features(state, feature_list):
"""Validate features for media players."""
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
supported_modes = []
if features & (media_player.const.SUPPORT_TURN_ON |
media_player.const.SUPPORT_TURN_OFF):
supported_modes.append(FEATURE_ON_OFF)
if features & (media_player.const.SUPPORT_PLAY |
media_player.const.SUPPORT_PAUSE):
supported_modes.append(FEATURE_PLAY_PAUSE)
if features & (media_player.const.SUPPORT_PLAY |
media_player.const.SUPPORT_STOP):
supported_modes.append(FEATURE_PLAY_STOP)
if features & media_player.const.SUPPORT_VOLUME_MUTE:
supported_modes.append(FEATURE_TOGGLE_MUTE)
error_list = []
for feature in feature_list:
if feature not in supported_modes:
error_list.append(feature)
if error_list:
_LOGGER.error('%s does not support features: %s',
state.entity_id, error_list)
return False
return True
SpeedRange = namedtuple('SpeedRange', ('start', 'target'))
SpeedRange.__doc__ += """ Maps Home Assistant speed \
values to percentage based HomeKit speeds.
start: Start of the range (inclusive).
target: Percentage to use to determine HomeKit percentages \
from HomeAssistant speed.
"""
class HomeKitSpeedMapping:
"""Supports conversion between Home Assistant and HomeKit fan speeds."""
def __init__(self, speed_list):
"""Initialize a new SpeedMapping object."""
if speed_list[0] != fan.SPEED_OFF:
_LOGGER.warning("%s does not contain the speed setting "
"%s as its first element. "
"Assuming that %s is equivalent to 'off'.",
speed_list, fan.SPEED_OFF, speed_list[0])
self.speed_ranges = OrderedDict()
list_size = len(speed_list)
for index, speed in enumerate(speed_list):
# By dividing by list_size -1 the following
# desired attributes hold true:
# * index = 0 => 0%, equal to "off"
# * index = len(speed_list) - 1 => 100 %
# * all other indices are equally distributed
target = index * 100 / (list_size - 1)
start = index * 100 / list_size
self.speed_ranges[speed] = SpeedRange(start, target)
def speed_to_homekit(self, speed):
"""Map Home Assistant speed state to HomeKit speed."""
if speed is None:
return None
speed_range = self.speed_ranges[speed]
return speed_range.target
def speed_to_states(self, speed):
"""Map HomeKit speed to Home Assistant speed state."""
for state, speed_range in reversed(self.speed_ranges.items()):
if speed_range.start <= speed:
return state
return list(self.speed_ranges.keys())[0]
def show_setup_message(hass, pincode):
"""Display persistent notification with setup information."""
pin = pincode.decode()
_LOGGER.info('Pincode: %s', pin)
message = 'To set up Home Assistant in the Home App, enter the ' \
'following code:\n### {}'.format(pin)
hass.components.persistent_notification.create(
message, 'HomeKit Setup', HOMEKIT_NOTIFY_ID)
def dismiss_setup_message(hass):
"""Dismiss persistent notification and remove QR code."""
hass.components.persistent_notification.dismiss(HOMEKIT_NOTIFY_ID)
def convert_to_float(state):
"""Return float of state, catch errors."""
try:
return float(state)
except (ValueError, TypeError):
return None
def temperature_to_homekit(temperature, unit):
"""Convert temperature to Celsius for HomeKit."""
return round(temp_util.convert(temperature, unit, TEMP_CELSIUS) * 2) / 2
def temperature_to_states(temperature, unit):
"""Convert temperature back from Celsius to Home Assistant unit."""
return round(temp_util.convert(temperature, TEMP_CELSIUS, unit) * 2) / 2
def density_to_air_quality(density):
"""Map PM2.5 density to HomeKit AirQuality level."""
if density <= 35:
return 1
if density <= 75:
return 2
if density <= 115:
return 3
if density <= 150:
return 4
return 5
|
|
# Copyright 2000-2010 Michael Hudson-Doyle <micahel@gmail.com>
# Antonio Cuni
# Armin Rigo
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import termios, select, os, struct, errno
import signal, re, time, sys
from fcntl import ioctl
from . import curses
from .fancy_termios import tcgetattr, tcsetattr
from .console import Console, Event
from .unix_eventqueue import EventQueue
from .trace import trace
class InvalidTerminal(RuntimeError):
pass
try:
unicode
except NameError:
unicode = str
_error = (termios.error, curses.error, InvalidTerminal)
# there are arguments for changing this to "refresh"
SIGWINCH_EVENT = 'repaint'
FIONREAD = getattr(termios, "FIONREAD", None)
TIOCGWINSZ = getattr(termios, "TIOCGWINSZ", None)
def _my_getstr(cap, optional=0):
r = curses.tigetstr(cap)
if not optional and r is None:
raise InvalidTerminal(
"terminal doesn't have the required '%s' capability"%cap)
return r
# at this point, can we say: AAAAAAAAAAAAAAAAAAAAAARGH!
def maybe_add_baudrate(dict, rate):
name = 'B%d'%rate
if hasattr(termios, name):
dict[getattr(termios, name)] = rate
ratedict = {}
for r in [0, 110, 115200, 1200, 134, 150, 1800, 19200, 200, 230400,
2400, 300, 38400, 460800, 4800, 50, 57600, 600, 75, 9600]:
maybe_add_baudrate(ratedict, r)
del r, maybe_add_baudrate
delayprog = re.compile(b"\\$<([0-9]+)((?:/|\\*){0,2})>")
try:
poll = select.poll
except AttributeError:
# this is exactly the minumum necessary to support what we
# do with poll objects
class poll:
def __init__(self):
pass
def register(self, fd, flag):
self.fd = fd
def poll(self, timeout=None):
r,w,e = select.select([self.fd],[],[],timeout)
return r
POLLIN = getattr(select, "POLLIN", None)
class UnixConsole(Console):
def __init__(self, f_in=0, f_out=1, term=None, encoding=None):
if encoding is None:
encoding = sys.getdefaultencoding()
self.encoding = encoding
if isinstance(f_in, int):
self.input_fd = f_in
else:
self.input_fd = f_in.fileno()
if isinstance(f_out, int):
self.output_fd = f_out
else:
self.output_fd = f_out.fileno()
self.pollob = poll()
self.pollob.register(self.input_fd, POLLIN)
curses.setupterm(term, self.output_fd)
self.term = term
self._bel = _my_getstr("bel")
self._civis = _my_getstr("civis", optional=1)
self._clear = _my_getstr("clear")
self._cnorm = _my_getstr("cnorm", optional=1)
self._cub = _my_getstr("cub", optional=1)
self._cub1 = _my_getstr("cub1", 1)
self._cud = _my_getstr("cud", 1)
self._cud1 = _my_getstr("cud1", 1)
self._cuf = _my_getstr("cuf", 1)
self._cuf1 = _my_getstr("cuf1", 1)
self._cup = _my_getstr("cup")
self._cuu = _my_getstr("cuu", 1)
self._cuu1 = _my_getstr("cuu1", 1)
self._dch1 = _my_getstr("dch1", 1)
self._dch = _my_getstr("dch", 1)
self._el = _my_getstr("el")
self._hpa = _my_getstr("hpa", 1)
self._ich = _my_getstr("ich", 1)
self._ich1 = _my_getstr("ich1", 1)
self._ind = _my_getstr("ind", 1)
self._pad = _my_getstr("pad", 1)
self._ri = _my_getstr("ri", 1)
self._rmkx = _my_getstr("rmkx", 1)
self._smkx = _my_getstr("smkx", 1)
## work out how we're going to sling the cursor around
if 0 and self._hpa: # hpa don't work in windows telnet :-(
self.__move_x = self.__move_x_hpa
elif self._cub and self._cuf:
self.__move_x = self.__move_x_cub_cuf
elif self._cub1 and self._cuf1:
self.__move_x = self.__move_x_cub1_cuf1
else:
raise RuntimeError("insufficient terminal (horizontal)")
if self._cuu and self._cud:
self.__move_y = self.__move_y_cuu_cud
elif self._cuu1 and self._cud1:
self.__move_y = self.__move_y_cuu1_cud1
else:
raise RuntimeError("insufficient terminal (vertical)")
if self._dch1:
self.dch1 = self._dch1
elif self._dch:
self.dch1 = curses.tparm(self._dch, 1)
else:
self.dch1 = None
if self._ich1:
self.ich1 = self._ich1
elif self._ich:
self.ich1 = curses.tparm(self._ich, 1)
else:
self.ich1 = None
self.__move = self.__move_short
self.event_queue = EventQueue(self.input_fd, self.encoding)
self.cursor_visible = 1
def change_encoding(self, encoding):
self.encoding = encoding
def refresh(self, screen, c_xy):
# this function is still too long (over 90 lines)
cx, cy = c_xy
if not self.__gone_tall:
while len(self.screen) < min(len(screen), self.height):
self.__hide_cursor()
self.__move(0, len(self.screen) - 1)
self.__write("\n")
self.__posxy = 0, len(self.screen)
self.screen.append("")
else:
while len(self.screen) < len(screen):
self.screen.append("")
if len(screen) > self.height:
self.__gone_tall = 1
self.__move = self.__move_tall
px, py = self.__posxy
old_offset = offset = self.__offset
height = self.height
# we make sure the cursor is on the screen, and that we're
# using all of the screen if we can
if cy < offset:
offset = cy
elif cy >= offset + height:
offset = cy - height + 1
elif offset > 0 and len(screen) < offset + height:
offset = max(len(screen) - height, 0)
screen.append("")
oldscr = self.screen[old_offset:old_offset + height]
newscr = screen[offset:offset + height]
# use hardware scrolling if we have it.
if old_offset > offset and self._ri:
self.__hide_cursor()
self.__write_code(self._cup, 0, 0)
self.__posxy = 0, old_offset
for i in range(old_offset - offset):
self.__write_code(self._ri)
oldscr.pop(-1)
oldscr.insert(0, "")
elif old_offset < offset and self._ind:
self.__hide_cursor()
self.__write_code(self._cup, self.height - 1, 0)
self.__posxy = 0, old_offset + self.height - 1
for i in range(offset - old_offset):
self.__write_code(self._ind)
oldscr.pop(0)
oldscr.append("")
self.__offset = offset
for y, oldline, newline, in zip(range(offset, offset + height),
oldscr,
newscr):
if oldline != newline:
self.__write_changed_line(y, oldline, newline, px)
y = len(newscr)
while y < len(oldscr):
self.__hide_cursor()
self.__move(0, y)
self.__posxy = 0, y
self.__write_code(self._el)
y += 1
self.__show_cursor()
self.screen = screen
self.move_cursor(cx, cy)
self.flushoutput()
def __write_changed_line(self, y, oldline, newline, px):
# this is frustrating; there's no reason to test (say)
# self.dch1 inside the loop -- but alternative ways of
# structuring this function are equally painful (I'm trying to
# avoid writing code generators these days...)
x = 0
minlen = min(len(oldline), len(newline))
#
# reuse the oldline as much as possible, but stop as soon as we
# encounter an ESCAPE, because it might be the start of an escape
# sequene
while x < minlen and oldline[x] == newline[x] and newline[x] != '\x1b':
x += 1
if oldline[x:] == newline[x+1:] and self.ich1:
if ( y == self.__posxy[1] and x > self.__posxy[0]
and oldline[px:x] == newline[px+1:x+1] ):
x = px
self.__move(x, y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, y
elif x < minlen and oldline[x + 1:] == newline[x + 1:]:
self.__move(x, y)
self.__write(newline[x])
self.__posxy = x + 1, y
elif (self.dch1 and self.ich1 and len(newline) == self.width
and x < len(newline) - 2
and newline[x+1:-1] == oldline[x:-2]):
self.__hide_cursor()
self.__move(self.width - 2, y)
self.__posxy = self.width - 2, y
self.__write_code(self.dch1)
self.__move(x, y)
self.__write_code(self.ich1)
self.__write(newline[x])
self.__posxy = x + 1, y
else:
self.__hide_cursor()
self.__move(x, y)
if len(oldline) > len(newline):
self.__write_code(self._el)
self.__write(newline[x:])
self.__posxy = len(newline), y
if '\x1b' in newline:
# ANSI escape characters are present, so we can't assume
# anything about the position of the cursor. Moving the cursor
# to the left margin should work to get to a known position.
self.move_cursor(0, y)
def __write(self, text):
self.__buffer.append((text, 0))
def __write_code(self, fmt, *args):
self.__buffer.append((curses.tparm(fmt, *args), 1))
def __maybe_write_code(self, fmt, *args):
if fmt:
self.__write_code(fmt, *args)
def __move_y_cuu1_cud1(self, y):
dy = y - self.__posxy[1]
if dy > 0:
self.__write_code(dy*self._cud1)
elif dy < 0:
self.__write_code((-dy)*self._cuu1)
def __move_y_cuu_cud(self, y):
dy = y - self.__posxy[1]
if dy > 0:
self.__write_code(self._cud, dy)
elif dy < 0:
self.__write_code(self._cuu, -dy)
def __move_x_hpa(self, x):
if x != self.__posxy[0]:
self.__write_code(self._hpa, x)
def __move_x_cub1_cuf1(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf1*dx)
elif dx < 0:
self.__write_code(self._cub1*(-dx))
def __move_x_cub_cuf(self, x):
dx = x - self.__posxy[0]
if dx > 0:
self.__write_code(self._cuf, dx)
elif dx < 0:
self.__write_code(self._cub, -dx)
def __move_short(self, x, y):
self.__move_x(x)
self.__move_y(y)
def __move_tall(self, x, y):
assert 0 <= y - self.__offset < self.height, y - self.__offset
self.__write_code(self._cup, y - self.__offset, x)
def move_cursor(self, x, y):
if y < self.__offset or y >= self.__offset + self.height:
self.event_queue.insert(Event('scroll', None))
else:
self.__move(x, y)
self.__posxy = x, y
self.flushoutput()
def prepare(self):
# per-readline preparations:
self.__svtermstate = tcgetattr(self.input_fd)
raw = self.__svtermstate.copy()
raw.iflag &=~ (termios.BRKINT | termios.INPCK |
termios.ISTRIP | termios.IXON)
raw.oflag &=~ (termios.OPOST)
raw.cflag &=~ (termios.CSIZE|termios.PARENB)
raw.cflag |= (termios.CS8)
raw.lflag &=~ (termios.ICANON|termios.ECHO|
termios.IEXTEN|(termios.ISIG*1))
raw.cc[termios.VMIN] = 1
raw.cc[termios.VTIME] = 0
tcsetattr(self.input_fd, termios.TCSADRAIN, raw)
self.screen = []
self.height, self.width = self.getheightwidth()
self.__buffer = []
self.__posxy = 0, 0
self.__gone_tall = 0
self.__move = self.__move_short
self.__offset = 0
self.__maybe_write_code(self._smkx)
try:
self.old_sigwinch = signal.signal(
signal.SIGWINCH, self.__sigwinch)
except ValueError:
pass
def restore(self):
self.__maybe_write_code(self._rmkx)
self.flushoutput()
tcsetattr(self.input_fd, termios.TCSADRAIN, self.__svtermstate)
if hasattr(self, 'old_sigwinch'):
signal.signal(signal.SIGWINCH, self.old_sigwinch)
def __sigwinch(self, signum, frame):
self.height, self.width = self.getheightwidth()
self.event_queue.insert(Event('resize', None))
def push_char(self, char):
trace('push char {char!r}', char=char)
self.event_queue.push(char)
def get_event(self, block=1):
while self.event_queue.empty():
while 1: # All hail Unix!
try:
self.push_char(os.read(self.input_fd, 1))
except (IOError, OSError) as err:
if err.errno == errno.EINTR:
if not self.event_queue.empty():
return self.event_queue.get()
else:
continue
else:
raise
else:
break
if not block:
break
return self.event_queue.get()
def wait(self):
self.pollob.poll()
def set_cursor_vis(self, vis):
if vis:
self.__show_cursor()
else:
self.__hide_cursor()
def __hide_cursor(self):
if self.cursor_visible:
self.__maybe_write_code(self._civis)
self.cursor_visible = 0
def __show_cursor(self):
if not self.cursor_visible:
self.__maybe_write_code(self._cnorm)
self.cursor_visible = 1
def repaint_prep(self):
if not self.__gone_tall:
self.__posxy = 0, self.__posxy[1]
self.__write("\r")
ns = len(self.screen)*['\000'*self.width]
self.screen = ns
else:
self.__posxy = 0, self.__offset
self.__move(0, self.__offset)
ns = self.height*['\000'*self.width]
self.screen = ns
if TIOCGWINSZ:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
height, width = struct.unpack(
"hhhh", ioctl(self.input_fd, TIOCGWINSZ, "\000"*8))[0:2]
if not height: return 25, 80
return height, width
else:
def getheightwidth(self):
try:
return int(os.environ["LINES"]), int(os.environ["COLUMNS"])
except KeyError:
return 25, 80
def forgetinput(self):
termios.tcflush(self.input_fd, termios.TCIFLUSH)
def flushoutput(self):
for text, iscode in self.__buffer:
if iscode:
self.__tputs(text)
else:
os.write(self.output_fd, text.encode(self.encoding))
del self.__buffer[:]
def __tputs(self, fmt, prog=delayprog):
"""A Python implementation of the curses tputs function; the
curses one can't really be wrapped in a sane manner.
I have the strong suspicion that this is complexity that
will never do anyone any good."""
# using .get() means that things will blow up
# only if the bps is actually needed (which I'm
# betting is pretty unlkely)
bps = ratedict.get(self.__svtermstate.ospeed)
while 1:
m = prog.search(fmt)
if not m:
os.write(self.output_fd, fmt)
break
x, y = m.span()
os.write(self.output_fd, fmt[:x])
fmt = fmt[y:]
delay = int(m.group(1))
if '*' in m.group(2):
delay *= self.height
if self._pad:
nchars = (bps*delay)/1000
os.write(self.output_fd, self._pad*nchars)
else:
time.sleep(float(delay)/1000.0)
def finish(self):
y = len(self.screen) - 1
while y >= 0 and not self.screen[y]:
y -= 1
self.__move(0, min(y, self.height + self.__offset - 1))
self.__write("\n\r")
self.flushoutput()
def beep(self):
self.__maybe_write_code(self._bel)
self.flushoutput()
if FIONREAD:
def getpending(self):
e = Event('key', '', '')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = struct.unpack(
"i", ioctl(self.input_fd, FIONREAD, "\0\0\0\0"))[0]
raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace')
e.data += raw
e.raw += raw
return e
else:
def getpending(self):
e = Event('key', '', '')
while not self.event_queue.empty():
e2 = self.event_queue.get()
e.data += e2.data
e.raw += e.raw
amount = 10000
raw = unicode(os.read(self.input_fd, amount), self.encoding, 'replace')
e.data += raw
e.raw += raw
return e
def clear(self):
self.__write_code(self._clear)
self.__gone_tall = 1
self.__move = self.__move_tall
self.__posxy = 0, 0
self.screen = []
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
"""
Allows editing of a line plot.
Left-dragging a point will move its position.
Right-drag pans the plot.
Mousewheel up and down zooms the plot in and out.
Pressing "z" brings up the Zoom Box, and you can click-drag a rectangular region to
zoom. If you use a sequence of zoom boxes, pressing alt-left-arrow and
alt-right-arrow moves you forwards and backwards through the "zoom history".
"""
# Major library imports
from numpy import linspace
from scipy.special import jn
from enthought.enable.example_support import DemoFrame, demo_main
from enthought.chaco.example_support import COLOR_PALETTE
# Enthought library imports
from enthought.enable.tools.api import DragTool
from enthought.enable.api import Component, ComponentEditor, Window
from enthought.traits.api import HasTraits, Instance, Int, Tuple
from enthought.traits.ui.api import Item, Group, View
# Chaco imports
from enthought.chaco.api import add_default_axes, add_default_grids, \
OverlayPlotContainer, PlotLabel, ScatterPlot, create_line_plot
from enthought.chaco.tools.api import PanTool, ZoomTool
class PointDraggingTool(DragTool):
component = Instance(Component)
# The pixel distance from a point that the cursor is still considered
# to be 'on' the point
threshold = Int(5)
# The index of the point being dragged
_drag_index = Int(-1)
# The original dataspace values of the index and value datasources
# corresponding to _drag_index
_orig_value = Tuple
def is_draggable(self, x, y):
# Check to see if (x, y) are over one of the points in self.component
if self._lookup_point(x, y) is not None:
return True
else:
return False
def normal_mouse_move(self, event):
plot = self.component
ndx = plot.map_index((event.x, event.y), self.threshold)
if ndx is None:
if plot.index.metadata.has_key('selections'):
del plot.index.metadata['selections']
else:
plot.index.metadata['selections'] = [ndx]
plot.invalidate_draw()
plot.request_redraw()
def drag_start(self, event):
plot = self.component
ndx = plot.map_index((event.x, event.y), self.threshold)
if ndx is None:
return
self._drag_index = ndx
self._orig_value = (plot.index.get_data()[ndx], plot.value.get_data()[ndx])
def dragging(self, event):
plot = self.component
(data_x, data_y) = plot.map_data((event.x, event.y))
plot.index._data[self._drag_index] = data_x
plot.value._data[self._drag_index] = data_y
plot.index.data_changed = True
plot.value.data_changed = True
plot.request_redraw()
def drag_cancel(self, event):
plot = self.component
plot.index._data[self._drag_index] = self._orig_value[0]
plot.value._data[self._drag_index] = self._orig_value[1]
plot.index.data_changed = True
plot.value.data_changed = True
plot.request_redraw()
def drag_end(self, event):
plot = self.component
if plot.index.metadata.has_key('selections'):
del plot.index.metadata['selections']
plot.invalidate_draw()
plot.request_redraw()
def _lookup_point(self, x, y):
""" Finds the point closest to a screen point if it is within self.threshold
Parameters
==========
x : float
screen x-coordinate
y : float
screen y-coordinate
Returns
=======
(screen_x, screen_y, distance) of datapoint nearest to the input *(x, y)*.
If no data points are within *self.threshold* of *(x, y)*, returns None.
"""
if hasattr(self.component, 'get_closest_point'):
# This is on BaseXYPlots
return self.component.get_closest_point((x, y), threshold=self.threshold)
return None
# ===============================================================================
## Create the Chaco plot.
# ===============================================================================
def _create_plot_component():
container = OverlayPlotContainer(padding=50, fill_padding=True,
bgcolor='lightgray', use_backbuffer=True)
# Create the initial X-series of data
numpoints = 30
low = -5
high = 15.0
x = linspace(low, high, numpoints)
y = jn(0, x)
lineplot = create_line_plot((x, y), color=tuple(COLOR_PALETTE[0]),
width=2.0)
lineplot.selected_color = 'none'
scatter = ScatterPlot(
index=lineplot.index,
value=lineplot.value,
index_mapper=lineplot.index_mapper,
value_mapper=lineplot.value_mapper,
color=tuple(COLOR_PALETTE[0]),
marker_size=5,
)
scatter.index.sort_order = 'ascending'
scatter.bgcolor = 'white'
scatter.border_visible = True
add_default_grids(scatter)
add_default_axes(scatter)
scatter.tools.append(PanTool(scatter, drag_button='right'))
# The ZoomTool tool is stateful and allows drawing a zoom
# box to select a zoom region.
zoom = ZoomTool(scatter, tool_mode='box', always_on=False,
drag_button=None)
scatter.overlays.append(zoom)
scatter.tools.append(PointDraggingTool(scatter))
container.add(lineplot)
container.add(scatter)
# Add the title at the top
container.overlays.append(PlotLabel('Line Editor',
component=container, font='swiss 16',
overlay_position='top'))
return container
# ===============================================================================
# Attributes to use for the plot view.
size = (800, 700)
title = 'Simple line plot'
# ===============================================================================
## Demo class that is used by the demo.py application.
# ===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(Group(Item('plot',
editor=ComponentEditor(size=size),
show_label=False), orientation='vertical'),
resizable=True, title=title)
def _plot_default(self):
return _create_plot_component()
demo = Demo()
# ===============================================================================
# Stand-alone frame to display the plot.
# ===============================================================================
class PlotFrame(DemoFrame):
def _create_window(self):
# Return a window containing our plots
return Window(self, -1, component=_create_plot_component())
if __name__ == '__main__':
demo_main(PlotFrame, size=size, title=title)
# --EOF---
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django import http
from django import shortcuts
from django.core.urlresolvers import reverse
from horizon import tables
from horizon import test
class FakeObject(object):
def __init__(self, id, name, value, status, optional=None, excluded=None):
self.id = id
self.name = name
self.value = value
self.status = status
self.optional = optional
self.excluded = excluded
self.extra = "extra"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
TEST_DATA = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'),
FakeObject('3', 'object_3', 'value_3', 'up'),
)
TEST_DATA_2 = (
FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'),
)
TEST_DATA_3 = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
)
class MyLinkAction(tables.LinkAction):
name = "login"
verbose_name = "Log In"
url = "horizon:auth_login"
attrs = {
"class": "ajax-modal",
}
def get_link_url(self, datum=None, *args, **kwargs):
return reverse(self.url)
class MyAction(tables.Action):
name = "delete"
verbose_name = "Delete Me"
verbose_name_plural = "Delete Them"
def allowed(self, request, obj=None):
return getattr(obj, 'status', None) != 'down'
def handle(self, data_table, request, object_ids):
return shortcuts.redirect('http://example.com/%s' % len(object_ids))
class MyUpdateAction(tables.UpdateAction):
def get_data(self, request, obj_id):
return TEST_DATA_2[0]
class MyBatchAction(tables.BatchAction):
name = "toggle"
action_present = ("Down", "Up")
action_past = ("Downed", "Upped")
data_type_singular = _("Item")
data_type_plural = _("Items")
def allowed(self, request, obj=None):
if not obj:
return False
self.down = getattr(obj, 'status', None) == 'down'
if self.down:
self.current_present_action = 1
return self.down or getattr(obj, 'status', None) == 'up'
def action(self, request, object_ids):
if self.down:
#up it
self.current_past_action = 1
class MyFilterAction(tables.FilterAction):
def filter(self, table, objs, filter_string):
q = filter_string.lower()
def comp(obj):
if q in obj.name.lower():
return True
return False
return filter(comp, objs)
def get_name(obj):
return "custom %s" % obj.name
def get_link(obj):
return reverse('horizon:auth_login')
class MyTable(tables.DataTable):
id = tables.Column('id', hidden=True)
name = tables.Column(get_name, verbose_name="Verbose Name", sortable=True)
value = tables.Column('value',
sortable=True,
link='http://example.com/',
attrs={'classes': ('green', 'blue')})
status = tables.Column('status', link=get_link)
optional = tables.Column('optional', empty_value='N/A')
excluded = tables.Column('excluded')
class Meta:
name = "my_table"
verbose_name = "My Table"
status_column = "status"
columns = ('id', 'name', 'value', 'optional', 'status')
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction, MyUpdateAction, MyBatchAction,)
class DataTableTests(test.TestCase):
def test_table_instantiation(self):
""" Tests everything that happens when the table is instantiated. """
self.table = MyTable(self.request, TEST_DATA)
# Properties defined on the table
self.assertEqual(self.table.data, TEST_DATA)
self.assertEqual(self.table.name, "my_table")
# Verify calculated options that weren't specified explicitly
self.assertTrue(self.table._meta.actions_column)
self.assertTrue(self.table._meta.multi_select)
# Test for verbose_name
self.assertEqual(unicode(self.table), u"My Table")
# Column ordering and exclusion.
# This should include auto-columns for multi_select and actions,
# but should not contain the excluded column.
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<Column: actions>'])
# Actions (these also test ordering)
self.assertQuerysetEqual(self.table.base_actions.values(),
['<MyAction: delete>',
'<MyFilterAction: filter>',
'<MyLinkAction: login>',
'<MyBatchAction: toggle>',
'<MyUpdateAction: update>'])
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>'])
self.assertQuerysetEqual(self.table.get_row_actions(TEST_DATA[0]),
['<MyAction: delete>',
'<MyLinkAction: login>',
'<MyUpdateAction: update>',
'<MyBatchAction: toggle>'])
# Auto-generated columns
multi_select = self.table.columns['multi_select']
self.assertEqual(multi_select.auto, "multi_select")
self.assertEqual(multi_select.get_classes(), "multi_select_column")
actions = self.table.columns['actions']
self.assertEqual(actions.auto, "actions")
self.assertEqual(actions.get_classes(), "actions_column")
def test_table_force_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
multi_select = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_force_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
actions_column = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_column_inheritance(self):
class TempTable(MyTable):
extra = tables.Column('extra')
class Meta:
name = "temp_table"
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: status>',
'<Column: optional>',
'<Column: excluded>',
'<Column: extra>',
'<Column: actions>'])
def test_table_construction(self):
self.table = MyTable(self.request, TEST_DATA)
# Verify we retrieve the right columns for headers
columns = self.table.get_columns()
self.assertQuerysetEqual(columns, ['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<Column: actions>'])
# Verify we retrieve the right rows from our data
rows = self.table.get_rows()
self.assertQuerysetEqual(rows, ['<Row: my_table__row__1>',
'<Row: my_table__row__2>',
'<Row: my_table__row__3>'])
# Verify each row contains the right cells
self.assertQuerysetEqual(rows[0].get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: optional, my_table__row__1>',
'<Cell: status, my_table__row__1>',
'<Cell: actions, my_table__row__1>'])
def test_table_column(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
row3 = self.table.get_rows()[2]
id_col = self.table.base_columns['id']
name_col = self.table.base_columns['name']
value_col = self.table.base_columns['value']
# transform
self.assertEqual(row.cells['id'].data, '1') # Standard attr access
self.assertEqual(row.cells['name'].data, 'custom object_1') # Callable
# name and verbose_name
self.assertEqual(unicode(id_col), "Id")
self.assertEqual(unicode(name_col), "Verbose Name")
# sortable
self.assertEqual(id_col.sortable, False)
self.assertNotIn("sortable", id_col.get_classes())
self.assertEqual(name_col.sortable, True)
self.assertIn("sortable", name_col.get_classes())
# hidden
self.assertEqual(id_col.hidden, True)
self.assertIn("hide", id_col.get_classes())
self.assertEqual(name_col.hidden, False)
self.assertNotIn("hide", name_col.get_classes())
# link and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# empty_value
self.assertEqual(row3.cells['optional'].value, "N/A")
# get_classes
self.assertEqual(value_col.get_classes(), "green blue sortable")
# status
cell_status = row.cells['status'].status
self.assertEqual(cell_status, True)
self.assertEqual(row.cells['status'].get_status_class(cell_status),
'status_up')
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True), ('3', None))
cell_status = row.cells['id'].status
self.assertEqual(cell_status, False)
self.assertEqual(row.cells['id'].get_status_class(cell_status),
'status_down')
cell_status = row3.cells['id'].status
self.assertEqual(cell_status, None)
self.assertEqual(row.cells['id'].get_status_class(cell_status),
'status_unknown')
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_table_row(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
self.assertEqual(row.table, self.table)
self.assertEqual(row.datum, TEST_DATA[0])
self.assertEqual(row.id, 'my_table__row__1')
# Verify row status works even if status isn't set on the column
self.assertEqual(row.status, True)
self.assertEqual(row.status_class, 'status_up')
# Check the cells as well
cell_status = row.cells['status'].status
self.assertEqual(cell_status, True)
self.assertEqual(row.cells['status'].get_status_class(cell_status),
'status_up')
def test_table_rendering(self):
self.table = MyTable(self.request, TEST_DATA)
# Table actions
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 1)
self.assertContains(resp, "my_table__filter__q", 1)
self.assertContains(resp, "my_table__delete", 1)
# Row actions
row_actions = self.table.render_row_actions(TEST_DATA[0])
resp = http.HttpResponse(row_actions)
self.assertContains(resp, "<li", 3)
self.assertContains(resp, "my_table__delete__1", 1)
self.assertContains(resp,
"action=update&table=my_table&obj_id=1", 1)
self.assertContains(resp, "data-update-interval", 1)
self.assertContains(resp, "my_table__toggle__1", 1)
self.assertContains(resp, "/auth/login/", 1)
self.assertContains(resp, "ajax-modal", 1)
# Whole table
resp = http.HttpResponse(self.table.render())
self.assertContains(resp, '<table id="my_table"', 1)
self.assertContains(resp, '<th ', 7)
self.assertContains(resp, '<tr id="my_table__row__1"', 1)
self.assertContains(resp, '<tr id="my_table__row__2"', 1)
self.assertContains(resp, '<tr id="my_table__row__3"', 1)
# Verify our XSS protection
self.assertContains(resp, '<a href="http://example.com/">'
'<strong>evil</strong></a>', 1)
# Filter = False hides the search box
self.table._meta.filter = False
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 0)
def test_table_actions(self):
# Single object action
action_string = "my_table__delete__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', '1'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "http://example.com/1")
# Single object batch action
# GET page - 'up' to 'down'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
self.assertEqual(len(self.table.get_row_actions(TEST_DATA_3[0])), 4)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[3]
self.assertEqual(unicode(toggle_action.verbose_name), "Down Item")
# Toggle from status 'up' to 'down'
# POST page
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'toggle', '1'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "/my_url/")
self.assertEqual(list(req._messages)[0].message,
u"Downed Item: object_1")
# Toggle from status 'down' to 'up'
# GET page - 'down' to 'up'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertEqual(len(self.table.get_row_actions(TEST_DATA_2[0])), 3)
toggle_action = self.table.get_row_actions(TEST_DATA_2[0])[2]
self.assertEqual(unicode(toggle_action.verbose_name), "Up Item")
# POST page
action_string = "my_table__toggle__2"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'toggle', '2'))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "/my_url/")
self.assertEqual(list(req._messages)[0].message,
u"Upped Item: object_2")
# Multiple object action
action_string = "my_table__delete"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', None))
handled = self.table.maybe_handle()
self.assertEqual(handled.status_code, 302)
self.assertEqual(handled["location"], "http://example.com/2")
# Action with nothing selected
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(self.table.parse_action(action_string),
('my_table', 'delete', None))
handled = self.table.maybe_handle()
self.assertEqual(handled, None)
self.assertEqual(list(req._messages)[0].message,
"Please select a row before taking that action.")
# Filtering
action_string = "my_table__filter__q"
req = self.factory.post('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertEqual(handled, None)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Updating and preemptive actions
params = {"table": "my_table", "action": "update", "obj_id": "1"}
req = self.factory.get('/my_url/',
params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(resp.status_code, 200)
# Make sure the data returned differs from the original
self.assertContains(resp, "my_table__row__1")
self.assertContains(resp, "status_down")
# Verify that we don't get a response for a valid action with the
# wrong method.
params = {"table": "my_table", "action": "delete", "obj_id": "1"}
req = self.factory.get('/my_url/', params)
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(resp, None)
resp = self.table.maybe_handle()
self.assertEqual(resp, None)
# Verbose names
table_actions = self.table.get_table_actions()
self.assertEqual(unicode(table_actions[0].verbose_name), "filter")
self.assertEqual(unicode(table_actions[1].verbose_name), "Delete Me")
row_actions = self.table.get_row_actions(TEST_DATA[0])
self.assertEqual(unicode(row_actions[0].verbose_name), "Delete Me")
self.assertEqual(unicode(row_actions[1].verbose_name), "Log In")
|
|
from random import choice
import json
import sys
MAX_WAY = 7.8
def way_cost(way_length):
if way_length <= 1:
return way_length * 100
elif way_length < MAX_WAY:
return (way_length * 100) ** 2
return sys.float_info.max
print "read data...."
pref = ""
if len(sys.argv):
pref = "_%s" % sys.argv[1]
with open("teams%s.json" % pref, "r") as f:
team_data = json.load(f)
with open("distances%s.json" % pref, "r") as f:
distance_data = json.load(f)
print "map teams...."
team_map = dict()
team_map_reverse = dict()
for (idx, team) in enumerate(team_data):
team_map[team["id"]] = idx
team_map_reverse[idx] = team["id"]
print "build distance matrix..."
distance_matrix = {0: {}, 1: {}}
for idx in distance_matrix:
for src in distance_data[idx]:
src_idx = team_map[int(src)]
if src_idx not in distance_matrix[idx]:
distance_matrix[idx][src_idx] = {}
for dst in distance_data[idx][src]:
dst_idx = team_map[int(dst)]
distance_matrix[idx][src_idx][dst_idx] = way_cost(distance_data[idx][src][dst])
print "calculate best routes...."
cnt = len(team_data)
cnt_hosts = cnt / 3
teams = range(cnt)
seen = dict([(idx, set()) for idx in teams])
def round_hosts(round_num=0):
begin = cnt_hosts * round_num
end = begin + cnt_hosts
return teams[begin:end]
def round_guests(hosts):
"""
:param hosts: list of int
"""
return [team for team in teams if team not in hosts]
def get_guest_tuple(all_guests, used):
"""
:param all_guests: list of int
:param used: list of int
"""
tmp_guests = [g for g in all_guests if g not in used]
for (idx, guest1) in enumerate(tmp_guests):
guest2_idx = idx + 1
for guest2 in tmp_guests[guest2_idx:]:
yield (guest1, guest2)
def not_seen(host, guests, seen_table):
host_seen = seen_table[host]
guest1_seen = seen_table[guests[0]]
return guests[0] not in host_seen and guests[1] not in host_seen and guests[1] not in guest1_seen
def add_meeting(host, guests, seen_table, meetings, stations):
tbl = seen_table.copy()
meeting = {host}
meeting.update(guests)
meetings.append(meeting)
for team in [host, guests[0], guests[1]]:
seen = tbl[team].copy()
seen.update(meeting)
tbl[team] = seen
stations[team] = host
# seen_table[team].update(meeting)
return tbl
def distance(current_distance, table_idx, prev_stations, teams, host):
for team in teams:
prev_host = prev_stations[team]
dist = distance_matrix[table_idx][prev_host][host]
current_distance += dist
return current_distance
second_hosts_list = round_hosts(1)
second_hosts_set = set(second_hosts_list)
def dummy_distance(host, guests):
for guest in guests:
if guest in second_hosts_set:
return distance_matrix[0][host][guest]
return distance_matrix[0][host][choice(second_hosts_list)] * 2
i = 0
best_distance = sys.float_info.max
best_plan = None
def guestsort(a, b):
if a[0] < b[0]:
return -1
elif a[0] == b[0]:
return 0
else:
return 1
def deploy_host(host_idx, current_hosts, current_round, current_guests, used_guests, seen_table, meetings_list,
stations_list, current_distance):
"""
:param seen_table: dict of set
"""
if host_idx == len(current_hosts):
if current_round < 2:
new_round = current_round + 1
new_hosts = round_hosts(new_round)
new_guests = round_guests(new_hosts)
return deploy_host(0, new_hosts, new_round, new_guests, set(), seen_table, meetings_list, stations_list,
current_distance)
else:
global i, best_distance, best_plan
i += 1
#print "ENDPOINT", meetings_list
if current_distance < best_distance:
print "new best (%i)" % i, current_distance, meetings_list
best_distance = current_distance
best_plan = meetings_list
return
tests = cnt_hosts * 3
actual_host = current_hosts[host_idx]
if current_round > 0:
current_distance = distance(current_distance, current_round - 1, stations_list[current_round - 1],
[actual_host], actual_host)
if current_distance >= best_distance:
#print "skip combination in round", current_round, "for host dist", current_distance, "best", best_distance
return
possible_guests = []
for guests in get_guest_tuple(current_guests, used_guests):
if not_seen(actual_host, guests, seen_table):
if current_round > 0:
actual_distance = distance(current_distance, current_round - 1, stations_list[current_round - 1],
guests, actual_host)
if actual_distance >= best_distance:
#print "skip combination in round", current_round, "for guests dist", actual_distance, "best", best_distance
continue
else:
actual_distance = dummy_distance(actual_host, guests)
possible_guests.append((actual_distance, guests))
possible_guests.sort(guestsort)
if current_round > 0:
tests = cnt_hosts / 3
for (actual_distance, guests) in possible_guests[:tests]:
if current_round == 0:
actual_distance = current_distance
#actual_seen = dict([(k, v.copy()) for (k, v) in seen_table.iteritems()])
actual_meetings = list(meetings_list) # XXX copy.deepcopy(meetings)
meetings = list(actual_meetings[current_round])
actual_meetings[current_round] = meetings
actual_stations = list(stations_list)
stations = actual_stations[current_round].copy()
actual_stations[current_round] = stations
actual_seen = add_meeting(actual_host, guests, seen_table, meetings, stations)
actual_used = used_guests.copy()
actual_used.update([guests[0], guests[1]])
deploy_host(host_idx + 1, current_hosts, current_round, current_guests, actual_used, actual_seen,
actual_meetings, actual_stations, actual_distance)
def test():
current_hosts = round_hosts(0)
current_guests = round_guests(current_hosts)
deploy_host(0, current_hosts, 0, current_guests, set(), seen, [[], [], []], [{}, {}, {}], 0)
print ""
print "======best plan======"
print "1st round:", best_plan[0]
print "2nd round:", best_plan[1]
print "3rd round:", best_plan[2]
test()
print ""
print "teams:", cnt
print "solutions that where calculated:", i
|
|
"""Make plots for displaying results of BEST test.
This module produces plots similar to those in
Kruschke, J. (2012) Bayesian estimation supersedes the t
test. Journal of Experimental Psychology: General.
"""
from __future__ import division
import numpy as np
from best import calculate_sample_statistics
import matplotlib.pyplot as plt
from matplotlib.transforms import blended_transform_factory
import matplotlib.lines as mpllines
import matplotlib.ticker as mticker
from pymc.distributions import noncentral_t_like
pretty_blue = '#89d1ea'
def plot_posterior( sample_vec, bins=None, ax=None, title=None, stat='mode',
label='', draw_zero=False ):
stats = calculate_sample_statistics( sample_vec )
stat_val = stats[stat]
hdi_min = stats['hdi_min']
hdi_max = stats['hdi_max']
if ax is not None:
if bins is not None:
kwargs = {'bins':bins}
else:
kwargs = {}
ax.hist( sample_vec, rwidth=0.8,
facecolor=pretty_blue, edgecolor='none', **kwargs )
if title is not None:
ax.set_title( title )
trans = blended_transform_factory(ax.transData, ax.transAxes)
ax.text( stat_val, 0.99, '%s = %.3g'%(stat,stat_val),
transform=trans,
horizontalalignment='center',
verticalalignment='top',
)
if draw_zero:
ax.axvline(0,linestyle=':')
# plot HDI
hdi_line, = ax.plot( [hdi_min, hdi_max], [0,0],
lw=5.0, color='k')
hdi_line.set_clip_on(False)
ax.text( hdi_min, 0.04, '%.3g'%hdi_min,
transform=trans,
horizontalalignment='center',
verticalalignment='bottom',
)
ax.text( hdi_max, 0.04, '%.3g'%hdi_max,
transform=trans,
horizontalalignment='center',
verticalalignment='bottom',
)
ax.text( (hdi_min+hdi_max)/2, 0.14, '95% HDI',
transform=trans,
horizontalalignment='center',
verticalalignment='bottom',
)
# make it pretty
ax.spines['bottom'].set_position(('outward',2))
for loc in ['left','top','right']:
ax.spines[loc].set_color('none') # don't draw
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks([]) # don't draw
ax.xaxis.set_major_locator( mticker.MaxNLocator(nbins=4) )
for line in ax.get_xticklines():
line.set_marker(mpllines.TICKDOWN)
ax.set_xlabel(label)
def plot_data_and_prediction( data, means, stds, numos, ax=None, bins=None,
n_curves=50, group='x'):
assert ax is not None
ax.hist( data, bins=bins, rwidth=0.5,
facecolor='r', edgecolor='none', normed=True )
if bins is not None:
if hasattr(bins,'__len__'):
xmin = bins[0]
xmax = bins[-1]
else:
xmin = np.min(data)
xmax = np.max(data)
n_samps = len(means)
idxs = map(int,np.round( np.random.uniform(size=n_curves)*n_samps ))
x = np.linspace(xmin, xmax, 100)
ax.set_xlabel('y')
ax.set_ylabel('p(y)')
for i in idxs:
m = means[i]
s = stds[i]
lam = 1/s**2
numo = numos[i]
nu = numo+1
v = np.exp([noncentral_t_like(xi,m,lam,nu) for xi in x])
ax.plot(x,v, color=pretty_blue, zorder=-10)
ax.text(0.8,0.95,'$\mathrm{N}_{%s}=%d$'%( group, len(data), ),
transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='top'
)
ax.xaxis.set_major_locator( mticker.MaxNLocator(nbins=4) )
ax.yaxis.set_major_locator( mticker.MaxNLocator(nbins=4) )
ax.set_title('Data Group %s w. Post. Pred.'%(group,))
def make_figure(M):
# plotting stuff
n_bins = 30
posterior_mean1 = M.trace('group1_mean')[:]
posterior_mean2 = M.trace('group2_mean')[:]
diff_means = posterior_mean1 - posterior_mean2
posterior_means = np.concatenate( (posterior_mean1,posterior_mean2) )
_, bin_edges_means = np.histogram( posterior_means, bins=n_bins )
posterior_std1 = M.trace('group1_std')[:]
posterior_std2 = M.trace('group2_std')[:]
diff_stds = posterior_std1 - posterior_std2
posterior_stds = np.concatenate( (posterior_std1,posterior_std2) )
_, bin_edges_stds = np.histogram( posterior_stds, bins=n_bins )
effect_size = diff_means / np.sqrt((posterior_std1**2+posterior_std2**2)/2)
post_nu_minus_one = M.trace('nu_minus_one')[:]
lognup = np.log10(post_nu_minus_one+1)
f = plt.figure(figsize=(8.2,11),facecolor='white')
ax1 = f.add_subplot(5,2,1,axisbg='none')
plot_posterior( posterior_mean1, bins=bin_edges_means, ax=ax1,
title='Group 1 Mean', stat='mean',
label=r'$\mu_1$')
ax3 = f.add_subplot(5,2,3,axisbg='none')
plot_posterior( posterior_mean2, bins=bin_edges_means, ax=ax3,
title='Group 2 Mean', stat='mean',
label=r'$\mu_2$')
ax5 = f.add_subplot(5,2,5,axisbg='none')
plot_posterior( posterior_std1, bins=bin_edges_stds, ax=ax5,
title='Group 1 Std. Dev.',
label=r'$\sigma_1$')
ax7 = f.add_subplot(5,2,7,axisbg='none')
plot_posterior( posterior_std2, bins=bin_edges_stds, ax=ax7,
title='Group 2 Std. Dev.',
label=r'$\sigma_2$')
ax9 = f.add_subplot(5,2,9,axisbg='none')
plot_posterior( lognup, bins=n_bins, ax=ax9,
title='Normality',
label=r'$\mathrm{log10}(\nu)$')
ax6 = f.add_subplot(5,2,6,axisbg='none')
plot_posterior( diff_means, bins=n_bins, ax=ax6,
title='Difference of Means',
stat='mean',
draw_zero=True,
label=r'$\mu_1 - \mu_2$')
ax8 = f.add_subplot(5,2,8,axisbg='none')
plot_posterior( diff_stds, bins=n_bins, ax=ax8,
title='Difference of Std. Dev.s',
draw_zero=True,
label=r'$\sigma_1 - \sigma_2$')
ax10 = f.add_subplot(5,2,10,axisbg='none')
plot_posterior( effect_size, bins=n_bins, ax=ax10,
title='Effect Size',
draw_zero=True,
label=r'$(\mu_1 - \mu_2)/\sqrt{(\sigma_1 + \sigma_2)/2}$')
orig_vals = np.concatenate( (M.group1.value, M.group2.value) )
bin_edges = np.linspace( np.min(orig_vals), np.max(orig_vals), 30 )
ax2 = f.add_subplot(5,2,2,axisbg='none')
plot_data_and_prediction(M.group1.value, posterior_mean1, posterior_std1,
post_nu_minus_one, ax=ax2, bins=bin_edges, group=1)
ax4 = f.add_subplot(5,2,4,axisbg='none',sharex=ax2,sharey=ax2)
plot_data_and_prediction(M.group2.value, posterior_mean2, posterior_std2,
post_nu_minus_one, ax=ax4, bins=bin_edges, group=2)
f.subplots_adjust(hspace=0.82,top=0.97,bottom=0.06,
left=0.09, right=0.95, wspace=0.45)
return f
|
|
import os, stat, socket
import re
import sys
import tempfile
tempprefix = 'hg-hghave-'
def matchoutput(cmd, regexp, ignorestatus=False):
"""Return True if cmd executes successfully and its output
is matched by the supplied regular expression.
"""
r = re.compile(regexp)
fh = os.popen(cmd)
s = fh.read()
try:
ret = fh.close()
except IOError:
# Happen in Windows test environment
ret = 1
return (ignorestatus or ret is None) and r.search(s)
def has_baz():
return matchoutput('baz --version 2>&1', r'baz Bazaar version')
def has_bzr():
try:
import bzrlib
return bzrlib.__doc__ is not None
except ImportError:
return False
def has_bzr114():
try:
import bzrlib
return (bzrlib.__doc__ is not None
and bzrlib.version_info[:2] >= (1, 14))
except ImportError:
return False
def has_cvs():
re = r'Concurrent Versions System.*?server'
return matchoutput('cvs --version 2>&1', re) and not has_msys()
def has_cvs112():
re = r'Concurrent Versions System \(CVS\) 1.12.*?server'
return matchoutput('cvs --version 2>&1', re) and not has_msys()
def has_darcs():
return matchoutput('darcs --version', r'2\.[2-9]', True)
def has_mtn():
return matchoutput('mtn --version', r'monotone', True) and not matchoutput(
'mtn --version', r'monotone 0\.', True)
def has_eol_in_paths():
try:
fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix, suffix='\n\r')
os.close(fd)
os.remove(path)
return True
except (IOError, OSError):
return False
def has_executablebit():
try:
EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
try:
os.close(fh)
m = os.stat(fn).st_mode & 0777
new_file_has_exec = m & EXECFLAGS
os.chmod(fn, m ^ EXECFLAGS)
exec_flags_cannot_flip = ((os.stat(fn).st_mode & 0777) == m)
finally:
os.unlink(fn)
except (IOError, OSError):
# we don't care, the user probably won't be able to commit anyway
return False
return not (new_file_has_exec or exec_flags_cannot_flip)
def has_icasefs():
# Stolen from mercurial.util
fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
os.close(fd)
try:
s1 = os.stat(path)
d, b = os.path.split(path)
p2 = os.path.join(d, b.upper())
if path == p2:
p2 = os.path.join(d, b.lower())
try:
s2 = os.stat(p2)
return s2 == s1
except OSError:
return False
finally:
os.remove(path)
def has_inotify():
try:
import hgext.inotify.linux.watcher
except ImportError:
return False
name = tempfile.mktemp(dir='.', prefix=tempprefix)
sock = socket.socket(socket.AF_UNIX)
try:
sock.bind(name)
except socket.error, err:
return False
sock.close()
os.unlink(name)
return True
def has_fifo():
if getattr(os, "mkfifo", None) is None:
return False
name = tempfile.mktemp(dir='.', prefix=tempprefix)
try:
os.mkfifo(name)
os.unlink(name)
return True
except OSError:
return False
def has_killdaemons():
return True
def has_cacheable_fs():
from mercurial import util
fd, path = tempfile.mkstemp(dir='.', prefix=tempprefix)
os.close(fd)
try:
return util.cachestat(path).cacheable()
finally:
os.remove(path)
def has_lsprof():
try:
import _lsprof
return True
except ImportError:
return False
def has_gettext():
return matchoutput('msgfmt --version', 'GNU gettext-tools')
def has_git():
return matchoutput('git --version 2>&1', r'^git version')
def has_docutils():
try:
from docutils.core import publish_cmdline
return True
except ImportError:
return False
def getsvnversion():
m = matchoutput('svn --version --quiet 2>&1', r'^(\d+)\.(\d+)')
if not m:
return (0, 0)
return (int(m.group(1)), int(m.group(2)))
def has_svn15():
return getsvnversion() >= (1, 5)
def has_svn13():
return getsvnversion() >= (1, 3)
def has_svn():
return matchoutput('svn --version 2>&1', r'^svn, version') and \
matchoutput('svnadmin --version 2>&1', r'^svnadmin, version')
def has_svn_bindings():
try:
import svn.core
version = svn.core.SVN_VER_MAJOR, svn.core.SVN_VER_MINOR
if version < (1, 4):
return False
return True
except ImportError:
return False
def has_p4():
return (matchoutput('p4 -V', r'Rev\. P4/') and
matchoutput('p4d -V', r'Rev\. P4D/'))
def has_symlink():
if getattr(os, "symlink", None) is None:
return False
name = tempfile.mktemp(dir='.', prefix=tempprefix)
try:
os.symlink(".", name)
os.unlink(name)
return True
except (OSError, AttributeError):
return False
def has_hardlink():
from mercurial import util
fh, fn = tempfile.mkstemp(dir='.', prefix=tempprefix)
os.close(fh)
name = tempfile.mktemp(dir='.', prefix=tempprefix)
try:
try:
util.oslink(fn, name)
os.unlink(name)
return True
except OSError:
return False
finally:
os.unlink(fn)
def has_tla():
return matchoutput('tla --version 2>&1', r'The GNU Arch Revision')
def has_gpg():
return matchoutput('gpg --version 2>&1', r'GnuPG')
def has_unix_permissions():
d = tempfile.mkdtemp(dir='.', prefix=tempprefix)
try:
fname = os.path.join(d, 'foo')
for umask in (077, 007, 022):
os.umask(umask)
f = open(fname, 'w')
f.close()
mode = os.stat(fname).st_mode
os.unlink(fname)
if mode & 0777 != ~umask & 0666:
return False
return True
finally:
os.rmdir(d)
def has_pyflakes():
return matchoutput("sh -c \"echo 'import re' 2>&1 | pyflakes\"",
r"<stdin>:1: 're' imported but unused",
True)
def has_pygments():
try:
import pygments
return True
except ImportError:
return False
def has_outer_repo():
# failing for other reasons than 'no repo' imply that there is a repo
return not matchoutput('hg root 2>&1',
r'abort: no repository found', True)
def has_ssl():
try:
import ssl
import OpenSSL
OpenSSL.SSL.Context
return True
except ImportError:
return False
def has_windows():
return os.name == 'nt'
def has_system_sh():
return os.name != 'nt'
def has_serve():
return os.name != 'nt' # gross approximation
def has_tic():
return matchoutput('test -x "`which tic`"', '')
def has_msys():
return os.getenv('MSYSTEM')
def has_aix():
return sys.platform.startswith("aix")
checks = {
"true": (lambda: True, "yak shaving"),
"false": (lambda: False, "nail clipper"),
"baz": (has_baz, "GNU Arch baz client"),
"bzr": (has_bzr, "Canonical's Bazaar client"),
"bzr114": (has_bzr114, "Canonical's Bazaar client >= 1.14"),
"cacheable": (has_cacheable_fs, "cacheable filesystem"),
"cvs": (has_cvs, "cvs client/server"),
"cvs112": (has_cvs112, "cvs client/server >= 1.12"),
"darcs": (has_darcs, "darcs client"),
"docutils": (has_docutils, "Docutils text processing library"),
"eol-in-paths": (has_eol_in_paths, "end-of-lines in paths"),
"execbit": (has_executablebit, "executable bit"),
"fifo": (has_fifo, "named pipes"),
"gettext": (has_gettext, "GNU Gettext (msgfmt)"),
"git": (has_git, "git command line client"),
"gpg": (has_gpg, "gpg client"),
"hardlink": (has_hardlink, "hardlinks"),
"icasefs": (has_icasefs, "case insensitive file system"),
"inotify": (has_inotify, "inotify extension support"),
"killdaemons": (has_killdaemons, 'killdaemons.py support'),
"lsprof": (has_lsprof, "python lsprof module"),
"mtn": (has_mtn, "monotone client (>= 1.0)"),
"outer-repo": (has_outer_repo, "outer repo"),
"p4": (has_p4, "Perforce server and client"),
"pyflakes": (has_pyflakes, "Pyflakes python linter"),
"pygments": (has_pygments, "Pygments source highlighting library"),
"serve": (has_serve, "platform and python can manage 'hg serve -d'"),
"ssl": (has_ssl, "python >= 2.6 ssl module and python OpenSSL"),
"svn": (has_svn, "subversion client and admin tools"),
"svn13": (has_svn13, "subversion client and admin tools >= 1.3"),
"svn15": (has_svn15, "subversion client and admin tools >= 1.5"),
"svn-bindings": (has_svn_bindings, "subversion python bindings"),
"symlink": (has_symlink, "symbolic links"),
"system-sh": (has_system_sh, "system() uses sh"),
"tic": (has_tic, "terminfo compiler"),
"tla": (has_tla, "GNU Arch tla client"),
"unix-permissions": (has_unix_permissions, "unix-style permissions"),
"windows": (has_windows, "Windows"),
"msys": (has_msys, "Windows with MSYS"),
"aix": (has_aix, "AIX"),
}
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
cls._main_device = "/job:localhost/replica:0/task:0/gpu:0"
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/cpu:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _generate_dump_from_simple_addition_graph(self):
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.Variable(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_count, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_count, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, "Node 'foo_bar' does not exist in partition graphs."):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session() as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.Variable(str1_init, name=str1_name)
str2 = variables.Variable(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.Variable(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.Variable(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s" % u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "%s" % s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsNone(u_vals[0])
self.assertEqual(1, len(s_vals))
self.assertIsNone(s_vals[0])
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session() as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.Variable(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.Variable(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(cond, body, [i], parallel_iterations=1)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
self.assertTrue(os.path.isdir(os.path.join(self._dump_root, u_namespace)))
self.assertTrue(
os.path.isdir(os.path.join(self._dump_root, v_namespace, "v")))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
16, sess.run(loop, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.Variable(10.0, name="x")
y = variables.Variable(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(run_options,
sess.graph,
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
self.assertEqual(
21, sess.run(cond, options=run_options, run_metadata=run_metadata))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.Variable(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session() as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(ValueError, "No node named \"foo\" exists"):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session() as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.Variable(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")))
u_file_path = dump.get_tensor_file_paths(u_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
u_timestamp = int(u_file_path[u_file_path.rindex("_") + 1:])
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
# Swap the time stamps
new_u_file_path = u_file_path[:u_file_path.rindex(
"_")] + "_%d" % v_timestamp
new_v_file_path = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % u_timestamp
os.rename(u_file_path, new_u_file_path)
os.rename(v_file_path, new_v_file_path)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.Variable([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(z, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.Variable(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
result = sess.run(y, options=run_options, run_metadata=run_metadata)
self.assertAllClose([2, 4, 7], result)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=1)
sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump1 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_count, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=2)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_count and
# executor_step_count.
sess.run(x, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump2 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_count + 1,
dump2.core_metadata.session_run_count)
self.assertEqual(dump1.core_metadata.executor_step_count + 1,
dump2.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_count, but not executor_step_count.
sess.run(y, feed_dict={ph: np.array([[7.0, 8.0]])}, options=run_options)
dump3 = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_count + 1,
dump3.core_metadata.session_run_count)
self.assertEqual(0, dump3.core_metadata.executor_step_count)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_count, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_count, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.Variable(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(c, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.Variable(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
sess.run(a.initializer, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.Variable("1", name="a")
b = variables.Variable("3", name="b")
c = variables.Variable("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
new_run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
new_run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=True)
self.assertEqual(264,
sess.run(
m,
options=new_run_options,
run_metadata=run_metadata))
# The integer-dtype Tensors in the graph should have been dumped
# properly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_urls=self._debug_urls())
sess.run(q_init, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
self.assertIsNone(dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0])
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.Variable(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.Variable(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls())
sess.run(w, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.Variable(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_counts = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_counts = []
executor_step_counts = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_counts.append(core_metadata.session_run_count)
executor_step_counts.append(core_metadata.executor_step_count)
all_session_run_counts.extend(session_run_counts)
# Assert that executor_step_count increases by one at a time.
executor_step_counts = zip(timestamps, executor_step_counts)
executor_step_counts = sorted(executor_step_counts, key=lambda x: x[0])
for i in xrange(len(executor_step_counts) - 1):
self.assertEquals(executor_step_counts[i][1] + 1,
executor_step_counts[i + 1][1])
# Assert that session_run_count increase monotonically.
session_run_counts = zip(timestamps, session_run_counts)
session_run_counts = sorted(session_run_counts, key=lambda x: x[0])
for i in xrange(len(session_run_counts) - 1):
self.assertGreater(session_run_counts[i + 1][1],
session_run_counts[i][1])
# Assert that the session_run_counts from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_counts),
len(set(all_session_run_counts)))
if __name__ == "__main__":
googletest.main()
|
|
from .constraint import Constraint
import copy
class Solver(object):
def __order_domain_values(self, item, csp, possible_bags):
"""least constraining value (MRV) heuristic"""
# Number of constraints
bags_constraints = []
# Dictionary of possible bags
item_possible_bags = possible_bags[item.name]
# Generate list of number of constraints for each bag
for bag in item_possible_bags:
# Count number of neighbor's inconsistent values
count = 0
for constraint in item.constraints:
# When have neighbor
cond = (constraint.constraint_type >=
Constraint.BINARY_CONSTRAINT_EQUALITY)
if cond:
# Get neighbor from constraint
neighbor = constraint.get_neighbor(item)
# Try put item in bag
item.bag = item_possible_bags[bag]
item_possible_bags[bag].items.append(item)
# Get number of inconsistent bags for current neighbor
num_bag_invalid = self.__num_invalid_bag(neighbor, possible_bags[neighbor.name])
# Restore contaminated bag
item_possible_bags[bag].items.remove(item)
item.bag = None
# Add neighbor's inconsistent bags
count += num_bag_invalid
# Keep bag and it's number of corresponding ruled
# out neighbor bags in list
bags_constraints.append([csp.bags[bag], count])
# Sort candidate bag that rules out the fewest choices
# for the neighboring variables in the constraint graph
sorted(bags_constraints, key=lambda bag: bag[1])
# Preserve only values in value-count pairs
bags = [bag[0] for bag in bags_constraints]
# return sorted values
return bags
def __select_unassigned_variable(self, assignment, csp, possible_bags):
"""Select unassigned variable with with fewest legal values"""
# Initialize legal values for items
unassigned_items = {item_name: csp.items[
item_name] for item_name in csp.items if item_name not in assignment}
# Get all unsigned
unassigned_item_names = list(unassigned_items.keys())
min_item_name = unassigned_item_names[0]
# Get number of constraints of each unassigned item
num_constraints = self.__get_num_constraints(csp, unassigned_items)
# Find the item with least possible bags
for item_name in unassigned_item_names[1:]:
# Number possible_bags
num_remaining_bag = len(possible_bags[item_name])
num_min_item = len(possible_bags[min_item_name])
# Select when have less possible bag
if num_remaining_bag < num_min_item:
min_item_name = item_name
elif num_remaining_bag == num_min_item:
# When have same number of possible bags
if num_constraints[item_name] >= num_constraints[min_item_name]:
# Select the one with maximum constraints
min_item_name = item_name
# print min_item_name
return csp.items[min_item_name]
def __num_invalid_bag(self, item, possible_bags):
"""Get number of ruled out values"""
# Counter
count = 0
# Loop through all possible values
for bag in possible_bags:
# Add item into bag
item.bag = possible_bags[bag]
possible_bags[bag].items.append(item)
# Validate value
for constraint in item.constraints:
if not constraint.validate():
# Invalid value
count += 1
break
# Clean up bag
possible_bags[bag].items.remove(item)
item.bag = None
# return count
return count
def __get_num_constraints(self, csp, unassigned_item_names):
"""Get number of constraints a variable on others"""
# Number of constraints
num_constraints = {}
# For all unsigned items
for item_name in unassigned_item_names:
# No constraints on other unsigned variable yet
count = 0
# Iterate through constraints
for constraint in csp.items[item_name].constraints:
# Binary constraints
cond = (constraint.constraint_type >=
Constraint.BINARY_CONSTRAINT_EQUALITY)
if cond:
# All items involved in constraint
for item in constraint.items:
# The other unassigned item
if item.name != item_name and item.name in unassigned_item_names:
# Increment number of constraints on other
# unassigned items
count += 1
# Save number of constraints for this item
num_constraints[item_name] = count
# return dictionary
return num_constraints
def __inference(self, csp, item, bag, assignment, possible_bags):
"""Make inference on neighbor"""
# Perform forward checking on neighbors
return self.__forward_checking(csp, item, bag, assignment, possible_bags)
def __is_complete(self, assignment, csp):
# Assignment complete when is true
return len(assignment) == len(csp.items)
def __all_bags_limit_full_test(self, csp):
"""Check whether is bag is 90% full and it's lower limit"""
for bag in csp.bags:
cond1 = not csp.bags[bag].is_ninety_percent_full()
cond2 = not csp.bags[bag].fit_lower_limit()
if cond1 or cond2:
# Invalid assignment
return False
# Good assignment
return True
def backtrack(self, assignment, csp, possible_bags):
# Stop when assignment completed
if self.__is_complete(assignment, csp):
if self.__all_bags_limit_full_test(csp):
# Valid assignment
return assignment
else:
# Fail
return None
# Make a copy of possible bags, preventing change that for caller
new_possible_bags = possible_bags.copy()
# Select an unassigned item based on minimum remaining value heuristic
item = self.__select_unassigned_variable(assignment, csp, new_possible_bags)
# Order possible bags based on least constraint value heuristic
for bag in self.__order_domain_values(item, csp, new_possible_bags):
# Check whether the value is consistent with its constraints
if self.__is_consistent(bag, item, assignment, csp):
# Initialize assignment dictionary
if item.name not in assignment:
assignment[item.name] = []
# Assign item with bag
assignment[item.name].append(bag.name)
bag.items.append(item)
# Propagate checking through arcs, rule out inconsistent neighbor values
inferences = self.__inference(csp, item, bag, assignment, new_possible_bags)
if inferences is not None:
# Successfully made inference
# Try to Assignment next unassigned variable
result = self.backtrack(assignment, csp, new_possible_bags)
if result is not None:
# Assignment succeed
bag.items.remove(item)
# print result
return result
# Assignment fails
# Restored removed neighbor candidate values
for inference in inferences:
for value in inferences[inference]:
new_possible_bags[inference][value] = inferences[inference][value]
# Clean up bag
bag.items.remove(item)
assignment[item.name].remove(bag.name)
# Clean up assignment when not assigned
if len(assignment[item.name]) == 0:
assignment.pop(item.name)
# Assignment fail
return None
def __is_consistent(self, bag, item, assignment, csp):
"""Checkout whether the value is consistent with constraints"""
if not bag.in_capacity(item):
return False
assigned_item_names = assignment.keys()
for constraint in item.constraints:
if Constraint.UNARY_CONSTRAINT_IN_BAGS <= constraint.constraint_type <= Constraint. \
UNARY_CONSTRAINT_NOT_IN_BAGS:
item.bag = bag
bag.items.append(item)
if not constraint.validate():
bag.items.remove(item)
item.bag = None
return False
item.bag = None
bag.items.remove(item)
elif Constraint.BINARY_CONSTRAINT_EQUALITY <= constraint.constraint_type \
<= Constraint.BINARY_CONSTRAINT_INCLUSIVITY:
neighbor = constraint.get_neighbor(item)
if neighbor.name in assigned_item_names:
item.bag = bag
bag.items.append(item)
for neighbor_bag_name in assignment[neighbor.name]:
neighbor.bag = csp.bags[neighbor_bag_name]
neighbor.bag.items.append(neighbor)
if not constraint.validate():
neighbor.bag.items.remove(neighbor)
bag.items.remove(item)
neighbor.bag = None
item.bag = None
return False
neighbor.bag.items.remove(neighbor)
neighbor.bag = None
bag.items.remove(item)
item.bag = None
return True
def __forward_checking(self, csp, item, bag, assignment, possible_bags):
unassigned_items = {item_name: csp.items[item_name] for item_name in csp.items if item_name not in assignment}
inferences = {}
for constraint in item.constraints:
if Constraint.BINARY_CONSTRAINT_EQUALITY <= constraint.constraint_type \
< Constraint.BINARY_CONSTRAINT_INCLUSIVITY:
# Get neighbor of item
neighbor = constraint.get_neighbor(item)
# When is unassigned neighbor
if neighbor.name in unassigned_items:
# Delete bag in neighbor's domain which is inconsistent with bag of current assigned item
# Add item into bag
bag.items.append(item)
item.bag = bag
invalid_bags = self.__clean_up_neighbor(constraint, neighbor, possible_bags)
bag.items.remove(item)
item.bag = None
# When no possible bag
if invalid_bags is None:
return None
# Map invalid bags to neighbor
inferences[neighbor.name] = invalid_bags
return inferences
def __clean_up_neighbor(self, constraint, item, possible_bags):
"""Remove inconsistent bags"""
# Keep bags invalid
invalid_bags = {}
loop = possible_bags[item.name].copy()
# Loop through all possible bags
for bag in loop:
item.bag = possible_bags[item.name][bag]
if not constraint.validate():
invalid_bags[bag] = possible_bags[item.name].pop(bag)
item.bag = None
if len(possible_bags[item.name]) == 0:
return None
return invalid_bags
def solve(self, csp):
possible_bags = {}
for item in csp.items:
possible_bags[item] = csp.bags.copy()
bt = self.backtrack({}, csp, possible_bags)
result = {}
for bag in csp.bags:
result[bag] = []
if bt:
for item in bt:
for bag in bt[item]:
result[bag].append(item)
else:
return None
return result
|
|
#!/usr/bin/env python
"""Memory Map File Analyser for ARM mbed"""
import sys
import os
import re
import csv
import json
import argparse
from copy import deepcopy
from prettytable import PrettyTable
from utils import argparse_filestring_type, \
argparse_lowercase_hyphen_type, argparse_uppercase_type
RE_ARMCC = re.compile(
r'^\s+0x(\w{8})\s+0x(\w{8})\s+(\w+)\s+(\w+)\s+(\d+)\s+[*]?.+\s+(.+)$')
RE_IAR = re.compile(
r'^\s+(.+)\s+(zero|const|ro code|inited|uninit)\s'
r'+0x(\w{8})\s+0x(\w+)\s+(.+)\s.+$')
RE_CMDLINE_FILE_IAR = re.compile(r'^#\s+(.+\.o)')
RE_LIBRARY_IAR = re.compile(r'^(.+\.a)\:.+$')
RE_OBJECT_LIBRARY_IAR = re.compile(r'^\s+(.+\.o)\s.*')
RE_OBJECT_FILE_GCC = re.compile(r'^(.+\/.+\.o)$')
RE_LIBRARY_OBJECT_GCC = re.compile(r'^.+\/lib(.+\.a)\((.+\.o)\)$')
RE_STD_SECTION_GCC = re.compile(r'^\s+.*0x(\w{8,16})\s+0x(\w+)\s(.+)$')
RE_FILL_SECTION_GCC = re.compile(r'^\s*\*fill\*\s+0x(\w{8,16})\s+0x(\w+).*$')
RE_OBJECT_ARMCC = re.compile(r'(.+\.(l|ar))\((.+\.o)\)')
class MemapParser(object):
"""An object that represents parsed results, parses the memory map files,
and writes out different file types of memory results
"""
print_sections = ('.text', '.data', '.bss')
misc_flash_sections = ('.interrupts', '.flash_config')
other_sections = ('.interrupts_ram', '.init', '.ARM.extab',
'.ARM.exidx', '.ARM.attributes', '.eh_frame',
'.init_array', '.fini_array', '.jcr', '.stab',
'.stabstr', '.ARM.exidx', '.ARM')
# sections to print info (generic for all toolchains)
sections = ('.text', '.data', '.bss', '.heap', '.stack')
def __init__(self):
""" General initialization
"""
# list of all modules and their sections
self.modules = dict() # full list - doesn't change with depth
self.short_modules = dict() # short version with specific depth
# sections must be defined in this order to take irrelevant out
self.all_sections = self.sections + self.other_sections + \
self.misc_flash_sections + ('unknown', 'OUTPUT')
# Memory report (sections + summary)
self.mem_report = []
# Just the memory summary section
self.mem_summary = dict()
self.subtotal = dict()
self.misc_flash_mem = 0
# Modules passed to the linker on the command line
# this is a dict because modules are looked up by their basename
self.cmd_modules = {}
def module_add(self, object_name, size, section):
""" Adds a module / section to the list
Positional arguments:
object_name - name of the entry to add
size - the size of the module being added
section - the section the module contributes to
"""
if not object_name or not size or not section:
return
if object_name in self.modules:
self.modules[object_name].setdefault(section, 0)
self.modules[object_name][section] += size
return
obj_split = os.sep + os.path.basename(object_name)
for module_path, contents in self.modules.items():
if module_path.endswith(obj_split) or module_path == object_name:
contents.setdefault(section, 0)
contents[section] += size
return
new_module = {section: size}
self.modules[object_name] = new_module
def module_replace(self, old_object, new_object):
""" Replaces an object name with a new one
"""
if old_object in self.modules:
self.modules[new_object] = self.modules[old_object]
del self.modules[old_object]
def check_new_section_gcc(self, line):
""" Check whether a new section in a map file has been detected (only
applies to gcc)
Positional arguments:
line - the line to check for a new section
"""
for i in self.all_sections:
if line.startswith(i):
# should name of the section (assuming it's a known one)
return i
if line.startswith('.'):
return 'unknown' # all others are classified are unknown
else:
return False # everything else, means no change in section
def parse_object_name_gcc(self, line):
""" Parse a path to object file
Positional arguments:
txt - the path to parse the object and module name from
"""
line = line.replace('\\', '/')
test_re_mbed_os_name = re.match(RE_OBJECT_FILE_GCC, line)
if test_re_mbed_os_name:
object_name = test_re_mbed_os_name.group(1)
# corner case: certain objects are provided by the GCC toolchain
if 'arm-none-eabi' in line:
return '[lib]/misc/' + object_name
return object_name
else:
test_re_obj_name = re.match(RE_LIBRARY_OBJECT_GCC, line)
if test_re_obj_name:
object_name = test_re_obj_name.group(1) + '/' + \
test_re_obj_name.group(2)
return '[lib]/' + object_name
else:
print "Unknown object name found in GCC map file: %s" % line
return '[misc]'
def parse_section_gcc(self, line):
""" Parse data from a section of gcc map file
examples:
0x00004308 0x7c ./BUILD/K64F/GCC_ARM/mbed-os/hal/targets/hal/TARGET_Freescale/TARGET_KPSDK_MCUS/spi_api.o
.text 0x00000608 0x198 ./BUILD/K64F/GCC_ARM/mbed-os/core/mbed-rtos/rtx/TARGET_CORTEX_M/TARGET_RTOS_M4_M7/TOOLCHAIN_GCC/HAL_CM4.o
Positional arguments:
line - the line to parse a section from
"""
is_fill = re.match(RE_FILL_SECTION_GCC, line)
if is_fill:
o_name = '[fill]'
o_size = int(is_fill.group(2), 16)
return [o_name, o_size]
is_section = re.match(RE_STD_SECTION_GCC, line)
if is_section:
o_size = int(is_section.group(2), 16)
if o_size:
o_name = self.parse_object_name_gcc(is_section.group(3))
return [o_name, o_size]
return ["", 0]
def parse_map_file_gcc(self, file_desc):
""" Main logic to decode gcc map files
Positional arguments:
file_desc - a stream object to parse as a gcc map file
"""
current_section = 'unknown'
with file_desc as infile:
for line in infile:
if line.startswith('Linker script and memory map'):
current_section = "unknown"
break
for line in infile:
next_section = self.check_new_section_gcc(line)
if next_section == "OUTPUT":
break
elif next_section:
current_section = next_section
object_name, object_size = self.parse_section_gcc(line)
self.module_add(object_name, object_size, current_section)
common_prefix = os.path.dirname(os.path.commonprefix([
o for o in self.modules.keys() if (o.endswith(".o") and not o.startswith("[lib]"))]))
new_modules = {}
for name, stats in self.modules.items():
if name.startswith("[lib]"):
new_modules[name] = stats
elif name.endswith(".o"):
new_modules[os.path.relpath(name, common_prefix)] = stats
else:
new_modules[name] = stats
self.modules = new_modules
def parse_object_name_armcc(self, line):
""" Parse object file
Positional arguments:
line - the line containing the object or library
"""
# simple object (not library)
if line[-2] == '.' and line[-1] == 'o':
return line
else:
is_obj = re.match(RE_OBJECT_ARMCC, line)
if is_obj:
object_name = os.path.basename(is_obj.group(1)) + '/' + is_obj.group(3)
return '[lib]/' + object_name
else:
print "Malformed input found when parsing ARMCC map: %s" % line
return '[misc]'
def parse_section_armcc(self, line):
""" Parse data from an armcc map file
Examples of armcc map file:
Base_Addr Size Type Attr Idx E Section Name Object
0x00000000 0x00000400 Data RO 11222 RESET startup_MK64F12.o
0x00000410 0x00000008 Code RO 49364 * !!!main c_w.l(__main.o)
Positional arguments:
line - the line to parse the section data from
"""
test_re_armcc = re.match(RE_ARMCC, line)
if test_re_armcc:
size = int(test_re_armcc.group(2), 16)
if test_re_armcc.group(4) == 'RO':
section = '.text'
else:
if test_re_armcc.group(3) == 'Data':
section = '.data'
elif test_re_armcc.group(3) == 'Zero':
section = '.bss'
else:
print "Malformed input found when parsing armcc map: %s" %\
line
# check name of object or library
object_name = self.parse_object_name_armcc(\
test_re_armcc.group(6))
return [object_name, size, section]
else:
return ["", 0, ""]
def parse_object_name_iar(self, object_name):
""" Parse object file
Positional arguments:
line - the line containing the object or library
"""
# simple object (not library)
if object_name.endswith(".o"):
try:
return self.cmd_modules[object_name]
except KeyError:
return object_name
else:
return '[misc]'
def parse_section_iar(self, line):
""" Parse data from an IAR map file
Examples of IAR map file:
Section Kind Address Size Object
.intvec ro code 0x00000000 0x198 startup_MK64F12.o [15]
.rodata const 0x00000198 0x0 zero_init3.o [133]
.iar.init_table const 0x00008384 0x2c - Linker created -
Initializer bytes const 0x00000198 0xb2 <for P3 s0>
.data inited 0x20000000 0xd4 driverAtmelRFInterface.o [70]
.bss zero 0x20000598 0x318 RTX_Conf_CM.o [4]
.iar.dynexit uninit 0x20001448 0x204 <Block tail>
HEAP uninit 0x20001650 0x10000 <Block tail>
Positional_arguments:
line - the line to parse section data from
"""
test_re_iar = re.match(RE_IAR, line)
if test_re_iar:
size = int(test_re_iar.group(4), 16)
if (test_re_iar.group(2) == 'const' or
test_re_iar.group(2) == 'ro code'):
section = '.text'
elif (test_re_iar.group(2) == 'zero' or
test_re_iar.group(2) == 'uninit'):
if test_re_iar.group(1)[0:4] == 'HEAP':
section = '.heap'
elif test_re_iar.group(1)[0:6] == 'CSTACK':
section = '.stack'
else:
section = '.bss' # default section
elif test_re_iar.group(2) == 'inited':
section = '.data'
else:
print "Malformed input found when parsing IAR map: %s" % line
# lookup object in dictionary and return module name
object_name = self.parse_object_name_iar(test_re_iar.group(5))
return [object_name, size, section]
else:
return ["", 0, ""] # no valid entry
def parse_map_file_armcc(self, file_desc):
""" Main logic to decode armc5 map files
Positional arguments:
file_desc - a file like object to parse as an armc5 map file
"""
with file_desc as infile:
# Search area to parse
for line in infile:
if line.startswith(' Base Addr Size'):
break
# Start decoding the map file
for line in infile:
self.module_add(*self.parse_section_armcc(line))
common_prefix = os.path.dirname(os.path.commonprefix([
o for o in self.modules.keys() if (o.endswith(".o") and o != "anon$$obj.o" and not o.startswith("[lib]"))]))
new_modules = {}
for name, stats in self.modules.items():
if name == "anon$$obj.o" or name.startswith("[lib]"):
new_modules[name] = stats
elif name.endswith(".o"):
new_modules[os.path.relpath(name, common_prefix)] = stats
else:
new_modules[name] = stats
self.modules = new_modules
def check_new_library_iar(self, line):
"""
Searches for libraries and returns name. Example:
m7M_tls.a: [43]
"""
test_address_line = re.match(RE_LIBRARY_IAR, line)
if test_address_line:
return test_address_line.group(1)
else:
return ""
def check_new_object_lib_iar(self, line):
"""
Searches for objects within a library section and returns name. Example:
rt7M_tl.a: [44]
ABImemclr4.o 6
ABImemcpy_unaligned.o 118
ABImemset48.o 50
I64DivMod.o 238
I64DivZer.o 2
"""
test_address_line = re.match(RE_OBJECT_LIBRARY_IAR, line)
if test_address_line:
return test_address_line.group(1)
else:
return ""
def parse_iar_command_line(self, lines):
"""Parse the files passed on the command line to the iar linker
Positional arguments:
lines -- an iterator over the lines within a file
"""
for line in lines:
if line.startswith("*"):
break
is_cmdline_file = RE_CMDLINE_FILE_IAR.match(line)
if is_cmdline_file:
full_path = is_cmdline_file.group(1)
self.cmd_modules[os.path.basename(full_path)] = full_path
common_prefix = os.path.dirname(os.path.commonprefix(self.cmd_modules.values()))
self.cmd_modules = {s: os.path.relpath(f, common_prefix)
for s, f in self.cmd_modules.items()}
def parse_map_file_iar(self, file_desc):
""" Main logic to decode IAR map files
Positional arguments:
file_desc - a file like object to parse as an IAR map file
"""
with file_desc as infile:
self.parse_iar_command_line(infile)
for line in infile:
if line.startswith(' Section '):
break
for line in infile:
self.module_add(*self.parse_section_iar(line))
if line.startswith('*** MODULE SUMMARY'): # finish section
break
current_library = ""
for line in infile:
library = self.check_new_library_iar(line)
if library:
current_library = library
object_name = self.check_new_object_lib_iar(line)
if object_name and current_library:
print("Replacing module", object_name, current_library)
temp = '[lib]' + '/'+ current_library + '/'+ object_name
self.module_replace(object_name, temp)
def reduce_depth(self, depth):
"""
populates the short_modules attribute with a truncated module list
(1) depth = 1:
main.o
mbed-os
(2) depth = 2:
main.o
mbed-os/test.o
mbed-os/drivers
"""
if depth == 0 or depth == None:
self.short_modules = deepcopy(self.modules)
else:
self.short_modules = dict()
for module_name, v in self.modules.items():
split_name = module_name.split('/')
if split_name[0] == '':
split_name = split_name[1:]
new_name = "/".join(split_name[:depth])
self.short_modules.setdefault(new_name, {})
for section_idx, value in v.items():
self.short_modules[new_name].setdefault(section_idx, 0)
self.short_modules[new_name][section_idx] += self.modules[module_name][section_idx]
export_formats = ["json", "csv-ci", "table"]
def generate_output(self, export_format, depth, file_output=None):
""" Generates summary of memory map data
Positional arguments:
export_format - the format to dump
Keyword arguments:
file_desc - descriptor (either stdout or file)
depth - directory depth on report
Returns: generated string for the 'table' format, otherwise None
"""
self.reduce_depth(depth)
self.compute_report()
try:
if file_output:
file_desc = open(file_output, 'wb')
else:
file_desc = sys.stdout
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
return False
to_call = {'json': self.generate_json,
'csv-ci': self.generate_csv,
'table': self.generate_table}[export_format]
output = to_call(file_desc)
if file_desc is not sys.stdout:
file_desc.close()
return output
def generate_json(self, file_desc):
"""Generate a json file from a memory map
Positional arguments:
file_desc - the file to write out the final report to
"""
file_desc.write(json.dumps(self.mem_report, indent=4))
file_desc.write('\n')
return None
def generate_csv(self, file_desc):
"""Generate a CSV file from a memoy map
Positional arguments:
file_desc - the file to write out the final report to
"""
csv_writer = csv.writer(file_desc, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csv_module_section = []
csv_sizes = []
for i in sorted(self.short_modules):
for k in self.print_sections:
csv_module_section += [i+k]
csv_sizes += [self.short_modules[i][k]]
csv_module_section += ['static_ram']
csv_sizes += [self.mem_summary['static_ram']]
csv_module_section += ['total_flash']
csv_sizes += [self.mem_summary['total_flash']]
csv_writer.writerow(csv_module_section)
csv_writer.writerow(csv_sizes)
return None
def generate_table(self, file_desc):
"""Generate a table from a memoy map
Returns: string of the generated table
"""
# Create table
columns = ['Module']
columns.extend(self.print_sections)
table = PrettyTable(columns)
table.align["Module"] = "l"
for col in self.print_sections:
table.align[col] = 'r'
for i in list(self.print_sections):
table.align[i] = 'r'
for i in sorted(self.short_modules):
row = [i]
for k in self.print_sections:
row.append(self.short_modules[i][k])
table.add_row(row)
subtotal_row = ['Subtotals']
for k in self.print_sections:
subtotal_row.append(self.subtotal[k])
table.add_row(subtotal_row)
output = table.get_string()
output += '\n'
output += "Total Static RAM memory (data + bss): %s bytes\n" % \
str(self.mem_summary['static_ram'])
output += "Total Flash memory (text + data): %s bytes\n" % \
str(self.mem_summary['total_flash'])
return output
toolchains = ["ARM", "ARM_STD", "ARM_MICRO", "GCC_ARM", "GCC_CR", "IAR"]
def compute_report(self):
""" Generates summary of memory usage for main areas
"""
for k in self.sections:
self.subtotal[k] = 0
for i in self.short_modules:
for k in self.sections:
self.short_modules[i].setdefault(k, 0)
self.subtotal[k] += self.short_modules[i][k]
self.mem_summary = {
'static_ram': (self.subtotal['.data'] + self.subtotal['.bss']),
'total_flash': (self.subtotal['.text'] + self.subtotal['.data']),
}
self.mem_report = []
for i in sorted(self.short_modules):
self.mem_report.append({
"module":i,
"size":{
k: self.short_modules[i][k] for k in self.print_sections
}
})
self.mem_report.append({
'summary': self.mem_summary
})
def parse(self, mapfile, toolchain):
""" Parse and decode map file depending on the toolchain
Positional arguments:
mapfile - the file name of the memory map file
toolchain - the toolchain used to create the file
"""
result = True
try:
with open(mapfile, 'r') as file_input:
if toolchain in ("ARM", "ARM_STD", "ARM_MICRO", "ARMC6"):
self.parse_map_file_armcc(file_input)
elif toolchain == "GCC_ARM" or toolchain == "GCC_CR":
self.parse_map_file_gcc(file_input)
elif toolchain == "IAR":
self.parse_map_file_iar(file_input)
else:
result = False
except IOError as error:
print "I/O error({0}): {1}".format(error.errno, error.strerror)
result = False
return result
def main():
"""Entry Point"""
version = '0.4.0'
# Parser handling
parser = argparse.ArgumentParser(
description="Memory Map File Analyser for ARM mbed\nversion %s" %
version)
parser.add_argument(
'file', type=argparse_filestring_type, help='memory map file')
parser.add_argument(
'-t', '--toolchain', dest='toolchain',
help='select a toolchain used to build the memory map file (%s)' %
", ".join(MemapParser.toolchains),
required=True,
type=argparse_uppercase_type(MemapParser.toolchains, "toolchain"))
parser.add_argument(
'-d', '--depth', dest='depth', type=int,
help='specify directory depth level to display report', required=False)
parser.add_argument(
'-o', '--output', help='output file name', required=False)
parser.add_argument(
'-e', '--export', dest='export', required=False, default='table',
type=argparse_lowercase_hyphen_type(MemapParser.export_formats,
'export format'),
help="export format (examples: %s: default)" %
", ".join(MemapParser.export_formats))
parser.add_argument('-v', '--version', action='version', version=version)
# Parse/run command
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
# Create memap object
memap = MemapParser()
# Parse and decode a map file
if args.file and args.toolchain:
if memap.parse(args.file, args.toolchain) is False:
sys.exit(0)
if args.depth is None:
depth = 2 # default depth level
else:
depth = args.depth
returned_string = None
# Write output in file
if args.output != None:
returned_string = memap.generate_output(args.export, \
depth, args.output)
else: # Write output in screen
returned_string = memap.generate_output(args.export, depth)
if args.export == 'table' and returned_string:
print returned_string
sys.exit(0)
if __name__ == "__main__":
main()
|
|
#-*- coding: utf-8 -*-
'''
Created on 2017. 02. 13
Updated on 2017. 02. 13
'''
from __future__ import print_function
import os
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
class ExpBase(object):
def __init__(self):
pass
def get_order(self, _dataX):
order = []
for group, projects in _dataX.iteritems():
for project, data in projects.iteritems():
order.append((group, project))
return order
def get_order_items(self, _dataX):
order = []
for group, projects in _dataX.iteritems():
for project, items in projects.iteritems():
for itemID in items:
order.append((group, project, itemID))
return order
def get_array(self, _dataX, _colX, _orders=None):
arrayX = []
labels = list()
if _orders is None:
for group, projects in _dataX.iteritems():
for project, item in projects.iteritems():
arrayX.append(item[_colX])
labels.append(project)
else:
for group, project in _orders:
arrayX.append(_dataX[group][project][_colX])
labels.append(project)
return arrayX, labels
def get_array_items(self, _dataX, _colX, _orders=None):
arrayX = []
labels = list()
if _orders is None:
for group, projects in _dataX.iteritems():
for project, items in projects.iteritems():
for itemID, item in items.iteritems():
arrayX.append(item[_colX])
labels.append(itemID)
else:
for group, project, itemID in _orders:
if itemID not in _dataX[group][project]: continue
arrayX.append(_dataX[group][project][itemID][_colX])
labels.append(itemID)
return arrayX, labels #list(labels)
def load_results(self, _filename, _types):
f = open(_filename, 'r')
lines = f.readlines()
f.close()
titles = lines[0].strip().split(u'\t')[2:]
if len(titles)> len(_types)-2:
titles = titles[:len(_types)-2]
for x in range(len(titles)): titles[x] = titles[x].strip()
data = {}
for line in lines[1:]:
cols = line.strip().split(u'\t')
group = cols[0]
project = cols[1]
if group not in data: data[group] = {} # group
data[group][project] = [] #project
for x in range(2, len(cols) if len(_types) > len(cols) else len(_types)):
data[group][project].append(self.get_value(cols[x], _types[x]))
# if _types[x] == 'int': data[group][project].append(int(cols[x]))
# elif _types[x] == 'float': data[group][project].append(float(cols[x]))
# elif _types[x] == 'str': data[group][project].append(str(cols[x]))
# else: data[group][project].append(str(cols[x]))
return titles, data
def get_value(self, _value, _type):
if _value == '':
return 0
if _type == 'int': return int(_value)
elif _type == 'float': return float(_value)
elif _type == 'str': return str(_value)
else: return str(_value)
def load_results_items(self, _filename, _types):
f = open(_filename, 'r')
lines = f.readlines()
f.close()
titles = lines[0].strip().split(u'\t')[3:]
if len(titles) > len(_types) - 3:
titles = titles[:len(_types) - 3]
for x in range(len(titles)): titles[x] = titles[x].strip()
data = {}
for line in lines[1:]:
cols = line.strip().split(u'\t')
group = cols[0]
project = cols[1]
itemID = cols[2]
if group not in data: data[group] = {} # group
if project not in data[group]: data[group][project] = {} # project
data[group][project][itemID] = [] # itemID
for x in range(3, 3+len(titles)):
data[group][project][itemID].append(self.get_value(cols[x], _types[x]))
return titles, data
def load_dict_data(self, _filename):
f = open(_filename, 'r')
text = f.read()
f.close()
parts = eval(text)
return parts
def draw_lineargress(self, _x, _y, _title, _xlabel, _ylabel, _filename):
plt.plot(_x, _y, 'ro')
#plt.axis([0, 6, 0, 20])
plt.title(_title)
plt.xlabel(_xlabel)
plt.ylabel(_ylabel)
filepath = os.path.join(self.OUTPUT, _filename)
plt.savefig(filepath)
plt.clf() # Clear figure
#plt.show()
def draw_scatter(self, _title, _Dx, _Dy, _xlabels, _ylabels, _datalabels, _filename):
plt.figure(num=None, figsize=(16, 12), dpi=100, facecolor='w', edgecolor='k')
plt.rc('font', **{'size':22})
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.plot(_Dx, _Dy, 'ro')
plt.title(_title)
plt.xlabel(_xlabels)
plt.ylabel(_ylabels)
for label, x, y in zip(_datalabels, _Dx, _Dy):
#if x <= 0.5 or y >= 0.4: continue
plt.annotate(label,
xy=(x, y), xytext=(0, 0),
textcoords='offset points', ha='left', va='bottom',
#bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
#arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0'),
fontsize = 15
)
filepath = os.path.join(self.OUTPUT, _filename)
plt.savefig(filepath)
plt.clf() # Clear figure
plt.close()
#plt.show()
def draw_scatter2(self, _title, _Dx, _Dy, _xlabel, _ylabel, _filepath):
#N = 50
x = np.asarray(_Dx)
y = np.asarray(_Dy)
plt.figure(num=None, figsize=(16, 12), dpi=100, facecolor='w', edgecolor='k')
plt.rc('font', **{'size': 22})
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.title(_title)
plt.xlabel(_xlabel)
plt.ylabel(_ylabel)
area = np.pi * (5 * np.random.rand(len(_Dx))) ** 2 # 0 to 15 point radii
plt.scatter(x, y, s=area, alpha=0.5) #c=colors,
plt.savefig(_filepath)
plt.clf() # Clear figure
plt.close()
#plt.show()
###############################################################################################################
###############################################################################################################
if __name__ == "__main__":
pass
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo.config import cfg
import webob
from cinder.api import extensions
from cinder.api.v2 import snapshot_metadata
from cinder.api.v2 import snapshots
import cinder.db
from cinder import exception
from cinder.openstack.common import jsonutils
from cinder import test
from cinder.tests.api import fakes
CONF = cfg.CONF
def return_create_snapshot_metadata_max(context,
snapshot_id,
metadata,
delete):
return stub_max_snapshot_metadata()
def return_create_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_snapshot_metadata()
def return_create_snapshot_metadata_insensitive(context, snapshot_id,
metadata, delete):
return stub_snapshot_metadata_insensitive()
def return_new_snapshot_metadata(context, snapshot_id, metadata, delete):
return stub_new_snapshot_metadata()
def return_snapshot_metadata(context, snapshot_id):
if not isinstance(snapshot_id, str) or not len(snapshot_id) == 36:
msg = 'id %s must be a uuid in return snapshot metadata' % snapshot_id
raise Exception(msg)
return stub_snapshot_metadata()
def return_empty_snapshot_metadata(context, snapshot_id):
return {}
def return_empty_container_metadata(context, snapshot_id, metadata, delete):
return {}
def delete_snapshot_metadata(context, snapshot_id, key):
pass
def stub_snapshot_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_snapshot_metadata_insensitive():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4",
}
return metadata
def stub_new_snapshot_metadata():
metadata = {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
}
return metadata
def stub_max_snapshot_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota_metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_snapshot(context, snapshot_id):
return {'id': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'status': 'available',
'metadata': {}}
def return_volume(context, volume_id):
return {'id': 'fake-vol-id',
'size': 100,
'name': 'fake',
'host': 'fake-host',
'status': 'available',
'encryption_key_id': None,
'volume_type_id': None,
'migration_status': None,
'metadata': {},
'project_id': context.project_id}
def return_snapshot_nonexistent(context, snapshot_id):
raise exception.SnapshotNotFound('bogus test message')
def fake_update_snapshot_metadata(self, context, snapshot, diff):
pass
class SnapshotMetaDataTest(test.TestCase):
def setUp(self):
super(SnapshotMetaDataTest, self).setUp()
self.volume_api = cinder.volume.api.API()
self.stubs.Set(cinder.db, 'volume_get', return_volume)
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(self.volume_api, 'update_snapshot_metadata',
fake_update_snapshot_metadata)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.snapshot_controller = snapshots.SnapshotsController(self.ext_mgr)
self.controller = snapshot_metadata.Controller()
self.req_id = str(uuid.uuid4())
self.url = '/v2/fake/snapshots/%s/metadata' % self.req_id
snap = {"volume_size": 100,
"volume_id": "fake-vol-id",
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"host": "fake-host",
"metadata": {}}
body = {"snapshot": snap}
req = fakes.HTTPRequest.blank('/v2/snapshots')
self.snapshot_controller.create(req, body)
def test_index(self):
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.req_id)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = fakes.HTTPRequest.blank(self.url + '/key2')
res_dict = self.controller.show(req, self.req_id, 'key2')
expected = {'meta': {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key2')
def test_show_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.req_id, 'key6')
def test_delete(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_delete',
delete_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.req_id, 'key2')
self.assertEqual(200, res.status_int)
def test_delete_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key1')
def test_delete_meta_not_found(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.req_id, 'key6')
def test_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(body, res_dict)
def test_create_with_keys_in_uppercase_and_lowercase(self):
# if the keys in uppercase_and_lowercase, should return the one
# which server added
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_empty_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata_insensitive)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key1": "value1",
"KEY1": "value1",
"key2": "value2",
"KEY2": "value2",
"key3": "value3",
"KEY4": "value4"}}
expected = {"metadata": {"key1": "value1",
"key2": "value2",
"key3": "value3",
"KEY4": "value4"}}
req.body = jsonutils.dumps(body)
res_dict = self.controller.create(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_create_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, None)
def test_create_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, body)
def test_create_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req, self.req_id, body)
def test_create_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank('/v2/snapshot_metadata')
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create, req, self.req_id, body)
def test_update_all(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_with_keys_in_uppercase_and_lowercase(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_get',
return_create_snapshot_metadata)
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_new_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {
'metadata': {
'key10': 'value10',
'KEY10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
expected = {
'metadata': {
'key10': 'value10',
'key99': 'value99',
'KEY20': 'value20',
},
}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, body)
self.assertEqual(expected, res_dict)
def test_update_all_empty_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_empty_container_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': {}}
req.body = jsonutils.dumps(expected)
res_dict = self.controller.update_all(req, self.req_id, expected)
self.assertEqual(expected, res_dict)
def test_update_all_malformed_container(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'meta': {}}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_malformed_data(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
expected = {'metadata': ['asdf']}
req.body = jsonutils.dumps(expected)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_all, req, self.req_id,
expected)
def test_update_all_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get', return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'PUT'
req.content_type = "application/json"
body = {'metadata': {'key10': 'value10'}}
req.body = jsonutils.dumps(body)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update_all, req, '100', body)
def test_update_item(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res_dict = self.controller.update(req, self.req_id, 'key1', body)
expected = {'meta': {'key1': 'value1'}}
self.assertEqual(expected, res_dict)
def test_update_item_nonexistent_snapshot(self):
self.stubs.Set(cinder.db, 'snapshot_get',
return_snapshot_nonexistent)
req = fakes.HTTPRequest.blank(
'/v2/fake/snapshots/asdf/metadata/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_empty_body(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
None)
def test_update_item_empty_key(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, '', body)
def test_update_item_key_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {("a" * 260): "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, ("a" * 260), body)
def test_update_item_value_too_long(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": ("a" * 260)}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.update,
req, self.req_id, "key1", body)
def test_update_item_too_many_keys(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/key1')
req.method = 'PUT'
body = {"meta": {"key1": "value1", "key2": "value2"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'key1',
body)
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url + '/bad')
req.method = 'PUT'
body = {"meta": {"key1": "value1"}}
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req, self.req_id, 'bad',
body)
def test_invalid_metadata_items_on_create(self):
self.stubs.Set(cinder.db, 'snapshot_metadata_update',
return_create_snapshot_metadata)
req = fakes.HTTPRequest.blank(self.url)
req.method = 'POST'
req.headers["content-type"] = "application/json"
#test for long key
data = {"metadata": {"a" * 260: "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for long value
data = {"metadata": {"key": "v" * 260}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.create, req, self.req_id, data)
#test for empty key.
data = {"metadata": {"": "value1"}}
req.body = jsonutils.dumps(data)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, self.req_id, data)
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import tc_lib
from neutron.common import utils
from neutron.services.qos import qos_consts
from neutron.tests.common.agents import l2_extensions
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.unit import testlib_api
from neutron.conf.plugins.ml2.drivers import linuxbridge as \
linuxbridge_agent_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \
mech_openvswitch as mech_ovs
load_tests = testlib_api.module_load_tests
BANDWIDTH_BURST = 100
BANDWIDTH_LIMIT = 500
DSCP_MARK = 16
class BaseQoSRuleTestCase(base.BaseFullStackTestCase):
def setUp(self):
host_desc = [environment.HostDescription(
l3_agent=False,
l2_agent_type=self.l2_agent_type)]
env_desc = environment.EnvironmentDescription(qos=True)
env = environment.Environment(env_desc, host_desc)
super(BaseQoSRuleTestCase, self).setUp(env)
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
def _create_qos_policy(self):
return self.safe_client.create_qos_policy(
self.tenant_id, 'fs_policy', 'Fullstack testing policy',
shared='False')
def _prepare_vm_with_qos_policy(self, rule_add_functions):
qos_policy = self._create_qos_policy()
qos_policy_id = qos_policy['id']
port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[0].hostname,
qos_policy_id)
for rule_add in rule_add_functions:
rule_add(qos_policy)
vm = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=port))
return vm, qos_policy
class TestBwLimitQoS(BaseQoSRuleTestCase):
scenarios = [
("ovs", {'l2_agent_type': constants.AGENT_TYPE_OVS}),
("linuxbridge", {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})
]
def _wait_for_bw_rule_applied_ovs_agent(self, vm, limit, burst):
utils.wait_until_true(
lambda: vm.bridge.get_egress_bw_limit_for_port(
vm.port.name) == (limit, burst))
def _wait_for_bw_rule_applied_linuxbridge_agent(self, vm, limit, burst):
port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
vm.neutron_port['id'])
tc = tc_lib.TcCommand(
port_name,
linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE,
namespace=vm.host.host_namespace
)
utils.wait_until_true(
lambda: tc.get_filters_bw_limits() == (limit, burst))
def _wait_for_bw_rule_applied(self, vm, limit, burst):
if isinstance(vm.bridge, ovs_lib.OVSBridge):
self._wait_for_bw_rule_applied_ovs_agent(vm, limit, burst)
if isinstance(vm.bridge, bridge_lib.BridgeDevice):
self._wait_for_bw_rule_applied_linuxbridge_agent(vm, limit, burst)
def _wait_for_bw_rule_removed(self, vm):
# No values are provided when port doesn't have qos policy
self._wait_for_bw_rule_applied(vm, None, None)
def _add_bw_limit_rule(self, limit, burst, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, limit, burst)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_bw_limit_qos_policy_rule_lifecycle(self):
new_limit = BANDWIDTH_LIMIT + 100
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_bw_limit_rule,
BANDWIDTH_LIMIT, BANDWIDTH_BURST)])
bw_rule = qos_policy['rules'][0]
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
qos_policy_id = qos_policy['id']
self.client.delete_bandwidth_limit_rule(bw_rule['id'], qos_policy_id)
self._wait_for_bw_rule_removed(vm)
# Create new rule with no given burst value, in such case ovs and lb
# agent should apply burst value as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
new_expected_burst = int(
new_limit * qos_consts.DEFAULT_BURST_RATE
)
new_rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, new_limit)
self._wait_for_bw_rule_applied(vm, new_limit, new_expected_burst)
# Update qos policy rule id
self.client.update_bandwidth_limit_rule(
new_rule['id'], qos_policy_id,
body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
'max_burst_kbps': BANDWIDTH_BURST}})
self._wait_for_bw_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_bw_rule_removed(vm)
class TestDscpMarkingQoS(BaseQoSRuleTestCase):
def setUp(self):
self.l2_agent_type = constants.AGENT_TYPE_OVS
super(TestDscpMarkingQoS, self).setUp()
def _wait_for_dscp_marking_rule_applied(self, vm, dscp_mark):
l2_extensions.wait_until_dscp_marking_rule_applied(
vm.bridge, vm.port.name, dscp_mark)
def _wait_for_dscp_marking_rule_removed(self, vm):
self._wait_for_dscp_marking_rule_applied(vm, None)
def _add_dscp_rule(self, dscp_mark, qos_policy):
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, dscp_mark)
# Make it consistent with GET reply
rule['type'] = qos_consts.RULE_TYPE_DSCP_MARKING
rule['qos_policy_id'] = qos_policy_id
qos_policy['rules'].append(rule)
def test_dscp_qos_policy_rule_lifecycle(self):
new_dscp_mark = DSCP_MARK + 8
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(
[functools.partial(self._add_dscp_rule, DSCP_MARK)])
dscp_rule = qos_policy['rules'][0]
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
qos_policy_id = qos_policy['id']
self.client.delete_dscp_marking_rule(dscp_rule['id'], qos_policy_id)
self._wait_for_dscp_marking_rule_removed(vm)
# Create new rule
new_rule = self.safe_client.create_dscp_marking_rule(
self.tenant_id, qos_policy_id, new_dscp_mark)
self._wait_for_dscp_marking_rule_applied(vm, new_dscp_mark)
# Update qos policy rule id
self.client.update_dscp_marking_rule(
new_rule['id'], qos_policy_id,
body={'dscp_marking_rule': {'dscp_mark': DSCP_MARK}})
self._wait_for_dscp_marking_rule_applied(vm, DSCP_MARK)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
self._wait_for_dscp_marking_rule_removed(vm)
class TestQoSWithL2Population(base.BaseFullStackTestCase):
def setUp(self):
# We limit this test to using the openvswitch mech driver, because DSCP
# is presently not implemented for Linux Bridge. The 'rule_types' API
# call only returns rule types that are supported by all configured
# mech drivers. So in a fullstack scenario, where both the OVS and the
# Linux Bridge mech drivers are configured, the DSCP rule type will be
# unavailable since it is not implemented in Linux Bridge.
mech_driver = 'openvswitch'
host_desc = [] # No need to register agents for this test case
env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True,
mech_drivers=mech_driver)
env = environment.Environment(env_desc, host_desc)
super(TestQoSWithL2Population, self).setUp(env)
def test_supported_qos_rule_types(self):
res = self.client.list_qos_rule_types()
rule_types = {t['type'] for t in res['rule_types']}
expected_rules = (
set(mech_ovs.OpenvswitchMechanismDriver.supported_qos_rule_types))
self.assertEqual(expected_rules, rule_types)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import _add_pooling
from ast import literal_eval
def _get_input_output_name(net, node, index=0):
name = node['name']
inputs = node['inputs']
if index == 'all':
input_name = [_get_node_name(net, inputs[idx][0]) for idx in range(len(inputs))]
elif type(index) == int:
input_name = _get_node_name(net, inputs[0][0])
else:
input_name = [_get_node_name(net, inputs[idx][0]) for idx in index]
return input_name, name
def _get_node_name(net, node_id):
return net['nodes'][node_id]['name']
def _get_node_shape(net, node_id):
return net['nodes'][node_id]['shape']
def _get_attrs(node):
"""get attribute dict from node
This functions keeps backward compatibility
for both attr and attrs key in the json field.
Parameters
----------
node : dict
The json graph Node
Returns
-------
attrs : dict
The attr dict, returns empty dict if
the field do not exist.
"""
if 'attrs' in node:
return node['attrs']
elif 'attr' in node:
return node['attr']
else:
return {}
# TODO These operators still need to be converted (listing in order of priority):
# High priority:
# mxnet.symbol.repeat -> builder.add_repeat to flatten and repeat the NDArray sequence
# mxnet.symbol.Crop -> builder.add_crop to crop image along spacial dimensions
# mxnet.symbol.Pad -> builder.add_padding putting 0's on height and width for tensor
# Low Priority:
# depthwise seperable convolution support through groups in builder.add_convolution
# add_optional -> for all RNNs defining what goes in and out (to define beam search or if input is streaming)
# mx.symbol.Embedding -> add_embedding takes indicies, word ids from dict that is outside coreml or
# in pipeline only if we have text mapping to indicies
# FusedRNNCell -> add_bidirlstm
# add_unilstm -> reverse_input param true as second and concat on outputs
# Do vanilla (0.9 mxnet) lstm, gru, vanilla_rnn
def convert_reshape(net, node, module, builder):
"""Converts a reshape layer from mxnet to coreml.
This doesn't currently handle the deprecated parameters for the reshape layer.
Parameters
----------
network: net
An mxnet network object.
layer: node
Node to convert.
module: module
A module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
target_shape = node['shape']
if any(item <= 0 for item in target_shape):
raise NotImplementedError('Special dimensional values less than or equal to 0 are not supported yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
if 'reverse' in node and node['reverse'] == 'True':
raise NotImplementedError('"reverse" parameter is not supported by yet.'
'Feel free to file an issue here: https://github.com/dmlc/mxnet/issues.')
mode = 0 # CHANNEL_FIRST
builder.add_reshape(name, input_name, output_name, target_shape, mode)
def convert_transpose(net, node, module, builder):
"""Convert a transpose layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
axes = literal_eval(param['axes'])
builder.add_permute(name, axes, input_name, output_name)
def convert_flatten(net, node, module, builder):
"""Convert a flatten layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mode = 0 # CHANNEL_FIRST
builder.add_flatten(name, mode, input_name, output_name)
def convert_softmax(net, node, module, builder):
"""Convert a softmax layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
builder.add_softmax(name=name,
input_name=input_name,
output_name=output_name)
def convert_activation(net, node, module, builder):
"""Convert an activation layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
mx_non_linearity = _get_attrs(node)['act_type']
#TODO add SCALED_TANH, SOFTPLUS, SOFTSIGN, SIGMOID_HARD, LEAKYRELU, PRELU, ELU, PARAMETRICSOFTPLUS, THRESHOLDEDRELU, LINEAR
if mx_non_linearity == 'relu':
non_linearity = 'RELU'
elif mx_non_linearity == 'tanh':
non_linearity = 'TANH'
elif mx_non_linearity == 'sigmoid':
non_linearity = 'SIGMOID'
else:
raise TypeError('Unknown activation type %s' % mx_non_linearity)
builder.add_activation(name = name,
non_linearity = non_linearity,
input_name = input_name,
output_name = output_name)
def convert_elementwise_add(net, node, module, builder):
"""Convert an elementwise add layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_names, output_name = _get_input_output_name(net, node, [0, 1])
name = node['name']
builder.add_elementwise(name, input_names, output_name, 'ADD')
def convert_dense(net, node, module, builder):
"""Convert a dense layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
has_bias = True
name = node['name']
inputs = node['inputs']
args, _ = module.get_params()
W = args[_get_node_name(net, inputs[1][0])].asnumpy()
if has_bias:
Wb = args[_get_node_name(net, inputs[2][0])].asnumpy()
else:
Wb = None
nC, nB = W.shape
builder.add_inner_product(
name=name,
W=W,
b=Wb,
input_channels=nB,
output_channels=nC,
has_bias=has_bias,
input_name=input_name,
output_name=output_name
)
def convert_convolution(net, node, module, builder):
"""Convert a convolution layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
inputs = node['inputs']
args, _ = module.get_params()
if 'no_bias' in param.keys():
has_bias = not literal_eval(param['no_bias'])
else:
has_bias = True
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
border_mode = "valid"
n_filters = int(param['num_filter'])
W = args[_get_node_name(net, inputs[1][0])].asnumpy()
if has_bias:
Wb = args[_get_node_name(net, inputs[2][0])].asnumpy()
else:
Wb = None
channels = W.shape[1]
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_height, kernel_width = literal_eval(param['kernel'])
W = W.transpose((2, 3, 1, 0))
builder.add_convolution(
name=name,
kernel_channels=channels,
output_channels=n_filters,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=1,
W=W,
b=Wb,
has_bias=has_bias,
is_deconv=False,
output_shape=None,
input_name=input_name,
output_name=output_name)
def convert_pooling(net, node, module, builder):
"""Convert a pooling layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
layer_type_mx = param['pool_type']
if layer_type_mx == 'max':
layer_type = 'MAX'
elif layer_type_mx == 'avg':
layer_type = 'AVERAGE'
else:
raise TypeError("Pooling type %s not supported" % layer_type_mx)
# Add padding if there is any
if 'pad' in param.keys() and literal_eval(param['pad']) != (0, 0):
pad = literal_eval(param['pad'])
builder.add_padding(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
value=0,
input_name=input_name,
output_name=name+"_pad_output")
input_name = name+"_pad_output"
stride_height = 1
stride_width = 1
if 'stride' in param.keys():
stride_height, stride_width = literal_eval(param['stride'])
kernel_width, kernel_height = literal_eval(param['kernel'])
type_map = {'valid': 'VALID', 'full': 'INCLUDE_LAST_PIXEL'}
padding_type = param['pooling_convention'] if 'pooling_convention' in param else 'valid'
if padding_type not in type_map:
raise KeyError("%s type is not supported in this converter. It is a Github issue.")
padding_type = type_map[padding_type]
if 'global_pool' in param.keys():
is_global = literal_eval(param['global_pool'])
else:
is_global = False
# For reasons why we are not using the standard builder but having our own implementation,
# see the function documentation.
_add_pooling.add_pooling_with_padding_types(
builder=builder,
name=name,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
layer_type=layer_type,
padding_type=padding_type,
exclude_pad_area=False,
is_global=is_global,
input_name=input_name,
output_name=output_name
)
def convert_batchnorm(net, node, module, builder):
"""Convert a batchnorm layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
inputs = node['inputs']
eps = 1e-3 # Default value of eps for MXNet.
use_global_stats = False # Default value of use_global_stats for MXNet.
attrs = _get_attrs(node)
if 'eps' in attrs:
eps = literal_eval(attrs['eps'])
args, aux = module.get_params()
gamma = args[_get_node_name(net, inputs[1][0])].asnumpy()
beta = args[_get_node_name(net, inputs[2][0])].asnumpy()
mean = aux[_get_node_name(net, inputs[3][0])].asnumpy()
variance = aux[_get_node_name(net, inputs[4][0])].asnumpy()
nb_channels = gamma.shape[0]
builder.add_batchnorm(
name=name,
channels=nb_channels,
gamma=gamma,
beta=beta,
mean=mean,
variance=variance,
input_name=input_name,
output_name=output_name,
epsilon=eps)
def convert_concat(net, node, module, builder):
"""Convert concat layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
# Get input and output names
input_names, output_name = _get_input_output_name(net, node, 'all')
name = node['name']
mode = 'CONCAT'
builder.add_elementwise(name = name, input_names = input_names,
output_name = output_name, mode = mode)
def convert_deconvolution(net, node, module, builder):
"""Convert a deconvolution layer from mxnet to coreml.
Parameters
----------
network: net
A mxnet network object.
layer: node
Node to convert.
module: module
An module for MXNet
builder: NeuralNetworkBuilder
A neural network builder object.
"""
input_name, output_name = _get_input_output_name(net, node)
name = node['name']
param = _get_attrs(node)
inputs = node['inputs']
args, _ = module.get_params()
if 'no_bias' in param.keys():
has_bias = not literal_eval(param['no_bias'])
else:
has_bias = False
border_mode = "valid"
n_filters = int(param['num_filter'])
output_shape = None
if 'target_shape' in param:
target_shape = literal_eval(param['target_shape'])
output_shape = (int(target_shape[0]), int(target_shape[1]))
W = args[_get_node_name(net, inputs[1][0])].asnumpy()
if has_bias:
Wb = args[_get_node_name(net, inputs[2][0])].asnumpy()
else:
Wb = None
channels = W.shape[0]
stride_height, stride_width = literal_eval(param['stride'])
kernel_height, kernel_width = literal_eval(param['kernel'])
W = W.transpose((2, 3, 0, 1))
use_crop = False
if literal_eval(param['pad']) != (0, 0) and output_shape is None:
use_crop = True
builder.add_convolution(
name=name,
kernel_channels=channels,
output_channels=n_filters,
height=kernel_height,
width=kernel_width,
stride_height=stride_height,
stride_width=stride_width,
border_mode=border_mode,
groups=1,
W=W,
b=Wb,
has_bias=has_bias,
is_deconv=True,
output_shape=output_shape,
input_name=input_name,
output_name=output_name+'before_pad' if use_crop else output_name
)
if use_crop:
pad = literal_eval(param['pad'])
builder.add_crop(
name=name+"_pad",
left=pad[1],
right=pad[1],
top=pad[0],
bottom=pad[0],
offset=0,
input_names=[output_name+'before_pad'],
output_name=output_name
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
from sahara import conductor as cond
from sahara import context
from sahara import exceptions as exc
from sahara.i18n import _
from sahara.plugins import provisioning as base
from sahara.plugins import utils as pl_utils
from sahara.service.castellan import utils as key_manager
from sahara.utils import cluster as cl_utils
from sahara.utils import cluster_progress_ops as cpo
from sahara.utils import files
conductor = cond.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
POLICY_FILES_DIR = '/tmp/UnlimitedPolicy'
class KDCInstallationFailed(exc.SaharaException):
code = 'KDC_INSTALL_FAILED'
message_template = _('KDC installation failed by reason: {reason}')
def __init__(self, reason):
message = self.message_template.format(reason=reason)
super(KDCInstallationFailed, self).__init__(message)
def _config(**kwargs):
return base.Config(
applicable_target='Kerberos', priority=1,
is_optional=True, scope='cluster', **kwargs)
enable_kerberos = _config(
name='Enable Kerberos Security', config_type='bool',
default_value=False)
use_existing_kdc = _config(
name='Existing KDC', config_type='bool',
default_value=False)
kdc_server_ip = _config(
name='Server IP of KDC', config_type='string',
default_value='192.168.0.1',
description=_('Server IP of KDC server when using existing KDC'))
realm_name = _config(
name='Realm Name', config_type='string',
default_value='SAHARA-KDC',
description=_('The name of realm to be used'))
admin_principal = _config(
name='Admin principal', config_type='string',
default_value='sahara/admin',
description=_('Admin principal for existing KDC server'))
admin_password = _config(
name='Admin password', config_type='string', default_value='')
policy_url = _config(
name="JCE libraries", config_type='string',
default_value=('http://tarballs.openstack.org/sahara/dist/'
'common-artifacts/'),
description=_('Java Cryptography Extension (JCE) '
'Unlimited Strength Jurisdiction Policy Files location')
)
def get_config_list():
return [
enable_kerberos,
use_existing_kdc,
kdc_server_ip,
realm_name,
admin_principal,
admin_password,
policy_url,
]
def get_kdc_host(cluster, server):
if using_existing_kdc(cluster):
return "server.%s" % CONF.node_domain
return server.fqdn()
def is_kerberos_security_enabled(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=enable_kerberos)
def using_existing_kdc(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=use_existing_kdc)
def get_kdc_server_ip(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=kdc_server_ip)
def get_realm_name(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=realm_name)
def get_admin_principal(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=admin_principal)
def get_admin_password(cluster):
# TODO(vgridnev): support in follow-up improved secret storage for
# configs
return pl_utils.get_config_value_or_default(
cluster=cluster, config=admin_password)
def get_policy_url(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=policy_url)
def setup_clients(cluster, server=None, instances=None):
if not instances:
instances = cl_utils.get_instances(cluster)
server_ip = None
cpo.add_provisioning_step(
cluster.id, _("Setting Up Kerberos clients"), len(instances))
if not server:
server_ip = get_kdc_server_ip(cluster)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn('setup-client-%s' % instance.instance_name,
_setup_client_node, cluster, instance,
server, server_ip)
def prepare_policy_files(cluster, instances=None):
if instances is None:
instances = pl_utils.get_instances(cluster)
remote_url = get_policy_url(cluster)
cpo.add_provisioning_step(
cluster.id, _("Preparing policy files"), len(instances))
with context.ThreadGroup() as tg:
for inst in instances:
tg.spawn(
'policy-files',
_prepare_policy_files, inst, remote_url)
def deploy_infrastructure(cluster, server=None):
if not is_kerberos_security_enabled(cluster):
LOG.debug("Kerberos security disabled for cluster")
return
if not using_existing_kdc(cluster):
deploy_kdc_server(cluster, server)
setup_clients(cluster, server)
def _execute_script(client, script):
with client.remote() as remote:
script_path = '/tmp/%s' % uuidutils.generate_uuid()[:8]
remote.write_file_to(script_path, script)
remote.execute_command('chmod +x %s' % script_path)
remote.execute_command('bash %s' % script_path)
remote.execute_command('rm -rf %s' % script_path)
def _get_kdc_config(cluster, os):
if os == "ubuntu":
data = files.get_file_text('plugins/resources/kdc_conf')
else:
data = files.get_file_text('plugins/resources/kdc_conf_redhat')
return data % {
'realm_name': get_realm_name(cluster)
}
def _get_krb5_config(cluster, server_fqdn):
data = files.get_file_text('plugins/resources/krb5_config')
return data % {
'realm_name': get_realm_name(cluster),
'server': server_fqdn,
'node_domain': CONF.node_domain,
}
def _get_short_uuid():
return "%s%s" % (uuidutils.generate_uuid()[:8],
uuidutils.generate_uuid()[:8])
def get_server_password(cluster):
if using_existing_kdc(cluster):
return get_admin_password(cluster)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, cluster)
extra = cluster.extra.to_dict() if cluster.extra else {}
passwd_key = 'admin-passwd-kdc'
if passwd_key not in extra:
passwd = _get_short_uuid()
key_id = key_manager.store_secret(passwd, ctx)
extra[passwd_key] = key_id
cluster = conductor.cluster_update(ctx, cluster, {'extra': extra})
passwd = key_manager.get_secret(extra.get(passwd_key), ctx)
return passwd
def _get_configs_dir(os):
if os == "ubuntu":
return "/etc/krb5kdc"
return "/var/kerberos/krb5kdc"
def _get_kdc_conf_path(os):
return "%s/kdc.conf" % _get_configs_dir(os)
def _get_realm_create_command(os):
if os == 'ubuntu':
return "krb5_newrealm"
return "kdb5_util create -s"
def _get_acl_config_path(os):
return "%s/kadm5.acl" % _get_configs_dir(os)
def _get_acl_config():
return "*/admin * "
def _get_start_command(os, version):
if os == "ubuntu":
return ("sudo service krb5-kdc restart && "
"sudo service krb5-admin-server restart")
if version.startswith('6'):
return ("sudo /etc/rc.d/init.d/krb5kdc start "
"&& sudo /etc/rc.d/init.d/kadmin start")
if version.startswith('7'):
return ("sudo systemctl start krb5kdc &&"
"sudo systemctl start kadmin")
raise ValueError(
_("Unable to get kdc server start command"))
def _get_server_installation_script(cluster, server_fqdn, os, version):
data = files.get_file_text(
'plugins/resources/mit-kdc-server-init.sh.template')
return data % {
'kdc_conf': _get_kdc_config(cluster, os),
'kdc_conf_path': _get_kdc_conf_path(os),
'acl_conf': _get_acl_config(),
'acl_conf_path': _get_acl_config_path(os),
'realm_create': _get_realm_create_command(os),
'krb5_conf': _get_krb5_config(cluster, server_fqdn),
'admin_principal': get_admin_principal(cluster),
'password': get_server_password(cluster),
'os': os,
'start_command': _get_start_command(os, version),
}
@cpo.event_wrapper(True, step=_("Deploy KDC server"), param=('cluster', 0))
def deploy_kdc_server(cluster, server):
with server.remote() as r:
os = r.get_os_distrib()
version = r.get_os_version()
script = _get_server_installation_script(
cluster, server.fqdn(), os, version)
_execute_script(server, script)
def _push_etc_hosts_entry(client, entry):
with client.remote() as r:
r.execute_command('echo %s | sudo tee -a /etc/hosts' % entry)
def _get_client_installation_script(cluster, server_fqdn, os):
data = files.get_file_text('plugins/resources/krb-client-init.sh.template')
return data % {
'os': os,
'krb5_conf': _get_krb5_config(cluster, server_fqdn),
}
@cpo.event_wrapper(True, param=('client', 1))
def _setup_client_node(cluster, client, server=None, server_ip=None):
if server:
server_fqdn = server.fqdn()
elif server_ip:
server_fqdn = "server." % CONF.node_domain
_push_etc_hosts_entry(
client, "%s %s %s" % (server_ip, server_fqdn, server))
else:
raise KDCInstallationFailed(_('Server or server ip are not provided'))
with client.remote() as r:
os = r.get_os_distrib()
script = _get_client_installation_script(cluster, server_fqdn, os)
_execute_script(client, script)
@cpo.event_wrapper(True)
def _prepare_policy_files(instance, remote_url):
with instance.remote() as r:
cmd = 'cut -f2 -d \"=\" /etc/profile.d/99-java.sh | head -1'
exit_code, java_home = r.execute_command(cmd)
java_home = java_home.strip()
results = [
r.execute_command(
"ls %s/local_policy.jar" % POLICY_FILES_DIR,
raise_when_error=False)[0] != 0,
r.execute_command(
"ls %s/US_export_policy.jar" % POLICY_FILES_DIR,
raise_when_error=False)[0] != 0
]
# a least one exit code is not zero
if any(results):
r.execute_command('mkdir %s' % POLICY_FILES_DIR)
r.execute_command(
"sudo curl %s/local_policy.jar -o %s/local_policy.jar" % (
remote_url, POLICY_FILES_DIR))
r.execute_command(
"sudo curl %s/US_export_policy.jar -o "
"%s/US_export_policy.jar" % (
remote_url, POLICY_FILES_DIR))
r.execute_command(
'sudo cp %s/*.jar %s/lib/security/'
% (POLICY_FILES_DIR, java_home))
def _get_script_for_user_creation(cluster, instance, user):
data = files.get_file_text(
'plugins/resources/create-principal-keytab')
cron_file = files.get_file_text('plugins/resources/cron-file')
cron_script = files.get_file_text('plugins/resources/cron-script')
data = data % {
'user': user, 'admin_principal': get_admin_principal(cluster),
'admin_password': get_server_password(cluster),
'principal': "%s/sahara-%s@%s" % (
user, instance.fqdn(), get_realm_name(cluster)),
'keytab': '%s-sahara-%s.keytab' % (user, instance.fqdn())
}
cron_script_location = '/tmp/sahara-kerberos/%s.sh' % _get_short_uuid()
cron_file = cron_file % {'refresher': cron_script_location, 'user': user}
cron_script = cron_script % {
'principal': "%s/sahara-%s@%s" % (
user, instance.fqdn(), get_realm_name(cluster)),
'keytab': '%s-sahara-%s.keytab' % (user, instance.fqdn()),
'user': user,
}
return data, cron_file, cron_script, cron_script_location
def _create_keytabs_for_user(instance, user):
script, cron, cron_script, cs_location = _get_script_for_user_creation(
instance.cluster, instance, user)
_execute_script(instance, script)
# setting up refresher
with instance.remote() as r:
tmp_location = '/tmp/%s' % _get_short_uuid()
r.write_file_to(tmp_location, cron_script, run_as_root=True)
r.execute_command(
"cat {0} | sudo tee {1} "
"&& rm -rf {0} && sudo chmod +x {1}".format(
tmp_location, cs_location))
r.execute_command(
'echo "%s" | sudo tee /etc/cron.d/%s.cron' % (
cron, _get_short_uuid()))
# executing script
r.execute_command('sudo bash %s' % cs_location)
@cpo.event_wrapper(
True, step=_('Setting up keytabs for users'), param=('cluster', 0))
def create_keytabs_for_map(cluster, mapper):
# cluster parameter is used by event log feature
with context.ThreadGroup() as tg:
for user, instances in mapper.items():
for instance in instances:
tg.spawn(
'create-keytabs', _create_keytabs_for_user,
instance, user)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.prolog
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Prolog and Prolog-like languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from ..lexer import RegexLexer, bygroups
from ..token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['PrologLexer', 'LogtalkLexer']
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
flags = re.UNICODE | re.MULTILINE
tokens = {
'root': [
(r'^#.*', Comment.Single),
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\nabcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
# (r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(u'\\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
return ':-' in text
class LogtalkLexer(RegexLexer):
"""
For `Logtalk <http://logtalk.org/>`_ source code.
.. versionadded:: 0.10
"""
name = 'Logtalk'
aliases = ['logtalk']
filenames = ['*.lgt', '*.logtalk']
mimetypes = ['text/x-logtalk']
tokens = {
'root': [
# Directives
(r'^\s*:-\s', Punctuation, 'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_]\w*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Message forwarding handler
(r'forward(?=[(])', Keyword),
# Execution-context methods
(r'(parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make)(?=[(])', Keyword),
(r'\blogtalk_make\b', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control constructs
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fa(il|lse)|true)\b', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading meta-predicates
(r'threaded(_(call|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
# Term unification
(r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
(r'(rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|truncate|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|a(cos|sin|tan)|exp|log|s(in|qrt))(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|'
r'ground|acyclic_term)(?=[(])', Keyword),
# Term comparison
(r'compare(?=[(])', Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'(ignore|once)(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Sorting
(r'(key)?sort(?=[(])', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Predicate aliases
(r'\bas\b', Operator),
# Arithemtic evaluation
(r'\bis\b', Keyword),
# Arithemtic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(e|pi|mod|rem)\b', Operator),
# Other arithemtic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Existential quantifier
(r'\^', Operator),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# Ponctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z]\w*", Text),
(r"'", String, 'quoted_atom'),
],
'quoted_atom': [
(r"''", String),
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))[.]', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))[.]', Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
(r'(built_in|dynamic|synchronized|threaded)[.]', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|'
r's(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z]\w*(?=[(])', Text, 'root'),
(r'[a-z]\w*[.]', Text, 'root'),
],
'entityrelations': [
(r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
# Numbers
(r"0'.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_]\w*)', Name.Variable),
# Atoms
(r"[a-z]\w*", Text),
(r"'", String, 'quoted_atom'),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Ponctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return 1.0
elif ':- protocol(' in text:
return 1.0
elif ':- category(' in text:
return 1.0
elif re.search('^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.