repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
pedro2d10/SickRage-FR | lib/sqlalchemy/dialects/sybase/base.py | 78 | 28800 | # sybase/base.py
# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sybase
:name: Sybase
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
import operator
import re
from sqlalchemy.sql import compiler, expression, text, bindparam
from sqlalchemy.engine import default, base, reflection
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import schema as sa_schema
from sqlalchemy import util, sql, exc
from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\
TEXT, DATE, DATETIME, FLOAT, NUMERIC,\
BIGINT, INT, INTEGER, SMALLINT, BINARY,\
VARBINARY, DECIMAL, TIMESTAMP, Unicode,\
UnicodeText, REAL
RESERVED_WORDS = set([
"add", "all", "alter", "and",
"any", "as", "asc", "backup",
"begin", "between", "bigint", "binary",
"bit", "bottom", "break", "by",
"call", "capability", "cascade", "case",
"cast", "char", "char_convert", "character",
"check", "checkpoint", "close", "comment",
"commit", "connect", "constraint", "contains",
"continue", "convert", "create", "cross",
"cube", "current", "current_timestamp", "current_user",
"cursor", "date", "dbspace", "deallocate",
"dec", "decimal", "declare", "default",
"delete", "deleting", "desc", "distinct",
"do", "double", "drop", "dynamic",
"else", "elseif", "encrypted", "end",
"endif", "escape", "except", "exception",
"exec", "execute", "existing", "exists",
"externlogin", "fetch", "first", "float",
"for", "force", "foreign", "forward",
"from", "full", "goto", "grant",
"group", "having", "holdlock", "identified",
"if", "in", "index", "index_lparen",
"inner", "inout", "insensitive", "insert",
"inserting", "install", "instead", "int",
"integer", "integrated", "intersect", "into",
"iq", "is", "isolation", "join",
"key", "lateral", "left", "like",
"lock", "login", "long", "match",
"membership", "message", "mode", "modify",
"natural", "new", "no", "noholdlock",
"not", "notify", "null", "numeric",
"of", "off", "on", "open",
"option", "options", "or", "order",
"others", "out", "outer", "over",
"passthrough", "precision", "prepare", "primary",
"print", "privileges", "proc", "procedure",
"publication", "raiserror", "readtext", "real",
"reference", "references", "release", "remote",
"remove", "rename", "reorganize", "resource",
"restore", "restrict", "return", "revoke",
"right", "rollback", "rollup", "save",
"savepoint", "scroll", "select", "sensitive",
"session", "set", "setuser", "share",
"smallint", "some", "sqlcode", "sqlstate",
"start", "stop", "subtrans", "subtransaction",
"synchronize", "syntax_error", "table", "temporary",
"then", "time", "timestamp", "tinyint",
"to", "top", "tran", "trigger",
"truncate", "tsequal", "unbounded", "union",
"unique", "unknown", "unsigned", "update",
"updating", "user", "using", "validate",
"values", "varbinary", "varchar", "variable",
"varying", "view", "wait", "waitfor",
"when", "where", "while", "window",
"with", "with_cube", "with_lparen", "with_rollup",
"within", "work", "writetext",
])
class _SybaseUnitypeMixin(object):
"""these types appear to return a buffer object."""
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
return str(value) # decode("ucs-2")
else:
return None
return process
class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNICHAR'
class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode):
__visit_name__ = 'UNIVARCHAR'
class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText):
__visit_name__ = 'UNITEXT'
class TINYINT(sqltypes.Integer):
__visit_name__ = 'TINYINT'
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
class MONEY(sqltypes.TypeEngine):
__visit_name__ = "MONEY"
class SMALLMONEY(sqltypes.TypeEngine):
__visit_name__ = "SMALLMONEY"
class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
__visit_name__ = "UNIQUEIDENTIFIER"
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
return self.visit_IMAGE(type_)
def visit_boolean(self, type_):
return self.visit_BIT(type_)
def visit_unicode(self, type_):
return self.visit_NVARCHAR(type_)
def visit_UNICHAR(self, type_):
return "UNICHAR(%d)" % type_.length
def visit_UNIVARCHAR(self, type_):
return "UNIVARCHAR(%d)" % type_.length
def visit_UNITEXT(self, type_):
return "UNITEXT"
def visit_TINYINT(self, type_):
return "TINYINT"
def visit_IMAGE(self, type_):
return "IMAGE"
def visit_BIT(self, type_):
return "BIT"
def visit_MONEY(self, type_):
return "MONEY"
def visit_SMALLMONEY(self, type_):
return "SMALLMONEY"
def visit_UNIQUEIDENTIFIER(self, type_):
return "UNIQUEIDENTIFIER"
ischema_names = {
'bigint': BIGINT,
'int': INTEGER,
'integer': INTEGER,
'smallint': SMALLINT,
'tinyint': TINYINT,
'unsigned bigint': BIGINT, # TODO: unsigned flags
'unsigned int': INTEGER, # TODO: unsigned flags
'unsigned smallint': SMALLINT, # TODO: unsigned flags
'numeric': NUMERIC,
'decimal': DECIMAL,
'dec': DECIMAL,
'float': FLOAT,
'double': NUMERIC, # TODO
'double precision': NUMERIC, # TODO
'real': REAL,
'smallmoney': SMALLMONEY,
'money': MONEY,
'smalldatetime': DATETIME,
'datetime': DATETIME,
'date': DATE,
'time': TIME,
'char': CHAR,
'character': CHAR,
'varchar': VARCHAR,
'character varying': VARCHAR,
'char varying': VARCHAR,
'unichar': UNICHAR,
'unicode character': UNIVARCHAR,
'nchar': NCHAR,
'national char': NCHAR,
'national character': NCHAR,
'nvarchar': NVARCHAR,
'nchar varying': NVARCHAR,
'national char varying': NVARCHAR,
'national character varying': NVARCHAR,
'text': TEXT,
'unitext': UNITEXT,
'binary': BINARY,
'varbinary': VARBINARY,
'image': IMAGE,
'bit': BIT,
# not in documentation for ASE 15.7
'long varchar': TEXT, # TODO
'timestamp': TIMESTAMP,
'uniqueidentifier': UNIQUEIDENTIFIER,
}
class SybaseInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_id(self, table_name, schema=None):
"""Return the table id from `table_name` and `schema`."""
return self.dialect.get_table_id(self.bind, table_name, schema,
info_cache=self.info_cache)
class SybaseExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
def set_ddl_autocommit(self, connection, value):
"""Must be implemented by subclasses to accommodate DDL executions.
"connection" is the raw unwrapped DBAPI connection. "value"
is True or False. when True, the connection should be configured
such that a DDL can take place subsequently. when False,
a DDL has taken place and the connection should be resumed
into non-autocommit mode.
"""
raise NotImplementedError()
def pre_exec(self):
if self.isinsert:
tbl = self.compiled.statement.table
seq_column = tbl._autoincrement_column
insert_has_sequence = seq_column is not None
if insert_has_sequence:
self._enable_identity_insert = \
seq_column.key in self.compiled_parameters[0]
else:
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
"The Sybase dialect only supports "
"DDL in 'autocommit' mode at this time.")
self.root_connection.engine.logger.info(
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
True)
def post_exec(self):
if self.isddl:
self.set_ddl_autocommit(self.root_connection, False)
if self._enable_identity_insert:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
)
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT @@identity AS lastrowid")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class SybaseSQLCompiler(compiler.SQLCompiler):
ansi_bind_rules = True
extract_map = util.update_copy(
compiler.SQLCompiler.extract_map,
{
'doy': 'dayofyear',
'dow': 'weekday',
'milliseconds': 'millisecond'
})
def get_select_precolumns(self, select):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
if select._limit:
#if select._limit == 1:
#s += "FIRST "
#else:
#s += "TOP %s " % (select._limit,)
s += "TOP %s " % (select._limit,)
if select._offset:
if not select._limit:
# FIXME: sybase doesn't allow an offset without a limit
# so use a huge value for TOP here
s += "TOP 1000000 "
s += "START AT %s " % (select._offset + 1,)
return s
def get_from_hint_text(self, table, text):
return text
def limit_clause(self, select):
# Limit in sybase is after the select keyword
return ""
def visit_extract(self, extract, **kw):
field = self.extract_map.get(extract.field, extract.field)
return 'DATEPART("%s", %s)' % (
field, self.process(extract.expr, **kw))
def visit_now_func(self, fn, **kw):
return "GETDATE()"
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''
def order_by_clause(self, select, **kw):
kw['literal_binds'] = True
order_by = self.process(select._order_by_clause, **kw)
# SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT
if order_by and (not self.is_subquery() or select._limit):
return " ORDER BY " + order_by
else:
return ""
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(column.type)
if column.table is None:
raise exc.CompileError(
"The Sybase dialect requires Table-bound "
"columns in order to generate DDL")
seq_col = column.table._autoincrement_column
# install a IDENTITY Sequence if we have an implicit IDENTITY column
if seq_col is column:
sequence = isinstance(column.default, sa_schema.Sequence) \
and column.default
if sequence:
start, increment = sequence.start or 1, \
sequence.increment or 1
else:
start, increment = 1, 1
if (start, increment) == (1, 1):
colspec += " IDENTITY"
else:
# TODO: need correct syntax for this
colspec += " IDENTITY(%s,%s)" % (start, increment)
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if column.nullable is not None:
if not column.nullable or column.primary_key:
colspec += " NOT NULL"
else:
colspec += " NULL"
return colspec
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX %s.%s" % (
self.preparer.quote_identifier(index.table.name),
self._prepared_index_name(drop.element,
include_schema=False)
)
class SybaseIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
class SybaseDialect(default.DefaultDialect):
name = 'sybase'
supports_unicode_statements = False
supports_sane_rowcount = False
supports_sane_multi_rowcount = False
supports_native_boolean = False
supports_unicode_binds = False
postfetch_lastrowid = True
colspecs = {}
ischema_names = ischema_names
type_compiler = SybaseTypeCompiler
statement_compiler = SybaseSQLCompiler
ddl_compiler = SybaseDDLCompiler
preparer = SybaseIdentifierPreparer
inspector = SybaseInspector
construct_arguments = []
def _get_default_schema_name(self, connection):
return connection.scalar(
text("SELECT user_name() as user_name",
typemap={'user_name': Unicode})
)
def initialize(self, connection):
super(SybaseDialect, self).initialize(connection)
if self.server_version_info is not None and\
self.server_version_info < (15, ):
self.max_identifier_length = 30
else:
self.max_identifier_length = 255
def get_table_id(self, connection, table_name, schema=None, **kw):
"""Fetch the id for schema.table_name.
Several reflection methods require the table id. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_id = None
if schema is None:
schema = self.default_schema_name
TABLEID_SQL = text("""
SELECT o.id AS id
FROM sysobjects o JOIN sysusers u ON o.uid=u.uid
WHERE u.name = :schema_name
AND o.name = :table_name
AND o.type in ('U', 'V')
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
if isinstance(table_name, unicode):
table_name = table_name.encode("ascii")
result = connection.execute(TABLEID_SQL,
schema_name=schema,
table_name=table_name)
table_id = result.scalar()
if table_id is None:
raise exc.NoSuchTableError(table_name)
return table_id
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
COLUMN_SQL = text("""
SELECT col.name AS name,
t.name AS type,
(col.status & 8) AS nullable,
(col.status & 128) AS autoincrement,
com.text AS 'default',
col.prec AS precision,
col.scale AS scale,
col.length AS length
FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
col.cdefault = com.id
WHERE col.usertype = t.usertype
AND col.id = :table_id
ORDER BY col.colid
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = []
for (name, type_, nullable, autoincrement, default, precision, scale,
length) in results:
col_info = self._get_column_info(name, type_, bool(nullable),
bool(autoincrement), default, precision, scale,
length)
columns.append(col_info)
return columns
def _get_column_info(self, name, type_, nullable, autoincrement, default,
precision, scale, length):
coltype = self.ischema_names.get(type_, None)
kwargs = {}
if coltype in (NUMERIC, DECIMAL):
args = (precision, scale)
elif coltype == FLOAT:
args = (precision,)
elif coltype in (CHAR, VARCHAR, UNICHAR, UNIVARCHAR, NCHAR, NVARCHAR):
args = (length,)
else:
args = ()
if coltype:
coltype = coltype(*args, **kwargs)
#is this necessary
#if is_array:
# coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(type_, name))
coltype = sqltypes.NULLTYPE
if default:
default = re.sub("DEFAULT", "", default).strip()
default = re.sub("^'(.*)'$", lambda m: m.group(1), default)
else:
default = None
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
return column_info
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
table_cache = {}
column_cache = {}
foreign_keys = []
table_cache[table_id] = {"name": table_name, "schema": schema}
COLUMN_SQL = text("""
SELECT c.colid AS id, c.name AS name
FROM syscolumns c
WHERE c.id = :table_id
""")
results = connection.execute(COLUMN_SQL, table_id=table_id)
columns = {}
for col in results:
columns[col["id"]] = col["name"]
column_cache[table_id] = columns
REFCONSTRAINT_SQL = text("""
SELECT o.name AS name, r.reftabid AS reftable_id,
r.keycnt AS 'count',
r.fokey1 AS fokey1, r.fokey2 AS fokey2, r.fokey3 AS fokey3,
r.fokey4 AS fokey4, r.fokey5 AS fokey5, r.fokey6 AS fokey6,
r.fokey7 AS fokey7, r.fokey1 AS fokey8, r.fokey9 AS fokey9,
r.fokey10 AS fokey10, r.fokey11 AS fokey11, r.fokey12 AS fokey12,
r.fokey13 AS fokey13, r.fokey14 AS fokey14, r.fokey15 AS fokey15,
r.fokey16 AS fokey16,
r.refkey1 AS refkey1, r.refkey2 AS refkey2, r.refkey3 AS refkey3,
r.refkey4 AS refkey4, r.refkey5 AS refkey5, r.refkey6 AS refkey6,
r.refkey7 AS refkey7, r.refkey1 AS refkey8, r.refkey9 AS refkey9,
r.refkey10 AS refkey10, r.refkey11 AS refkey11,
r.refkey12 AS refkey12, r.refkey13 AS refkey13,
r.refkey14 AS refkey14, r.refkey15 AS refkey15,
r.refkey16 AS refkey16
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
referential_constraints = connection.execute(REFCONSTRAINT_SQL,
table_id=table_id)
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE o.id = :table_id
""")
for r in referential_constraints:
reftable_id = r["reftable_id"]
if reftable_id not in table_cache:
c = connection.execute(REFTABLE_SQL, table_id=reftable_id)
reftable = c.fetchone()
c.close()
table_info = {"name": reftable["name"], "schema": None}
if (schema is not None or
reftable["schema"] != self.default_schema_name):
table_info["schema"] = reftable["schema"]
table_cache[reftable_id] = table_info
results = connection.execute(COLUMN_SQL, table_id=reftable_id)
reftable_columns = {}
for col in results:
reftable_columns[col["id"]] = col["name"]
column_cache[reftable_id] = reftable_columns
reftable = table_cache[reftable_id]
reftable_columns = column_cache[reftable_id]
constrained_columns = []
referred_columns = []
for i in range(1, r["count"] + 1):
constrained_columns.append(columns[r["fokey%i" % i]])
referred_columns.append(reftable_columns[r["refkey%i" % i]])
fk_info = {
"constrained_columns": constrained_columns,
"referred_schema": reftable["schema"],
"referred_table": reftable["name"],
"referred_columns": referred_columns,
"name": r["name"]
}
foreign_keys.append(fk_info)
return foreign_keys
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
INDEX_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
(i.status & 0x2) AS 'unique',
index_col(object_name(i.id), i.indid, 1) AS col_1,
index_col(object_name(i.id), i.indid, 2) AS col_2,
index_col(object_name(i.id), i.indid, 3) AS col_3,
index_col(object_name(i.id), i.indid, 4) AS col_4,
index_col(object_name(i.id), i.indid, 5) AS col_5,
index_col(object_name(i.id), i.indid, 6) AS col_6,
index_col(object_name(i.id), i.indid, 7) AS col_7,
index_col(object_name(i.id), i.indid, 8) AS col_8,
index_col(object_name(i.id), i.indid, 9) AS col_9,
index_col(object_name(i.id), i.indid, 10) AS col_10,
index_col(object_name(i.id), i.indid, 11) AS col_11,
index_col(object_name(i.id), i.indid, 12) AS col_12,
index_col(object_name(i.id), i.indid, 13) AS col_13,
index_col(object_name(i.id), i.indid, 14) AS col_14,
index_col(object_name(i.id), i.indid, 15) AS col_15,
index_col(object_name(i.id), i.indid, 16) AS col_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 0
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(INDEX_SQL, table_id=table_id)
indexes = []
for r in results:
column_names = []
for i in range(1, r["count"]):
column_names.append(r["col_%i" % (i,)])
index_info = {"name": r["name"],
"unique": bool(r["unique"]),
"column_names": column_names}
indexes.append(index_info)
return indexes
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
table_id = self.get_table_id(connection, table_name, schema,
info_cache=kw.get("info_cache"))
PK_SQL = text("""
SELECT object_name(i.id) AS table_name,
i.keycnt AS 'count',
i.name AS name,
index_col(object_name(i.id), i.indid, 1) AS pk_1,
index_col(object_name(i.id), i.indid, 2) AS pk_2,
index_col(object_name(i.id), i.indid, 3) AS pk_3,
index_col(object_name(i.id), i.indid, 4) AS pk_4,
index_col(object_name(i.id), i.indid, 5) AS pk_5,
index_col(object_name(i.id), i.indid, 6) AS pk_6,
index_col(object_name(i.id), i.indid, 7) AS pk_7,
index_col(object_name(i.id), i.indid, 8) AS pk_8,
index_col(object_name(i.id), i.indid, 9) AS pk_9,
index_col(object_name(i.id), i.indid, 10) AS pk_10,
index_col(object_name(i.id), i.indid, 11) AS pk_11,
index_col(object_name(i.id), i.indid, 12) AS pk_12,
index_col(object_name(i.id), i.indid, 13) AS pk_13,
index_col(object_name(i.id), i.indid, 14) AS pk_14,
index_col(object_name(i.id), i.indid, 15) AS pk_15,
index_col(object_name(i.id), i.indid, 16) AS pk_16
FROM sysindexes i, sysobjects o
WHERE o.id = i.id
AND o.id = :table_id
AND (i.status & 2048) = 2048
AND i.indid BETWEEN 1 AND 254
""")
results = connection.execute(PK_SQL, table_id=table_id)
pks = results.fetchone()
results.close()
constrained_columns = []
for i in range(1, pks["count"] + 1):
constrained_columns.append(pks["pk_%i" % (i,)])
return {"constrained_columns": constrained_columns,
"name": pks["name"]}
@reflection.cache
def get_schema_names(self, connection, **kw):
SCHEMA_SQL = text("SELECT u.name AS name FROM sysusers u")
schemas = connection.execute(SCHEMA_SQL)
return [s["name"] for s in schemas]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
TABLE_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'U'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
tables = connection.execute(TABLE_SQL, schema_name=schema)
return [t["name"] for t in tables]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_DEF_SQL = text("""
SELECT c.text
FROM syscomments c JOIN sysobjects o ON c.id = o.id
WHERE o.name = :view_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(view_name, unicode):
view_name = view_name.encode("ascii")
view = connection.execute(VIEW_DEF_SQL, view_name=view_name)
return view.scalar()
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = self.default_schema_name
VIEW_SQL = text("""
SELECT o.name AS name
FROM sysobjects o JOIN sysusers u ON o.uid = u.uid
WHERE u.name = :schema_name
AND o.type = 'V'
""")
if util.py2k:
if isinstance(schema, unicode):
schema = schema.encode("ascii")
views = connection.execute(VIEW_SQL, schema_name=schema)
return [v["name"] for v in views]
def has_table(self, connection, table_name, schema=None):
try:
self.get_table_id(connection, table_name, schema)
except exc.NoSuchTableError:
return False
else:
return True
| gpl-3.0 |
joergdietrich/astropy | astropy/cosmology/tests/test_cosmology.py | 2 | 69922 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from io import StringIO
import numpy as np
from .. import core, funcs
from ...tests.helper import pytest, quantity_allclose as allclose
from ... import units as u
try:
import scipy # pylint: disable=W0611
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
def test_init():
""" Tests to make sure the code refuses inputs it is supposed to"""
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=-0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Neff=-1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
Tcmb0=u.Quantity([0.0, 2], u.K))
with pytest.raises(ValueError):
h0bad = u.Quantity([70, 100], u.km / u.s / u.Mpc)
cosmo = core.FlatLambdaCDM(H0=h0bad, Om0=0.27)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, m_nu=0.5)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([0.15, 0.2, 0.1], u.eV)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, Neff=2, m_nu=bad_mnu)
with pytest.raises(ValueError):
bad_mnu = u.Quantity([-0.3, 0.2], u.eV) # 2, expecting 3
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.2, m_nu=bad_mnu)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=-0.04)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Ob0=0.4)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Ob(1)
with pytest.raises(ValueError):
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27)
cosmo.Odm(1)
with pytest.raises(TypeError):
core.default_cosmology.validate(4)
def test_basic():
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0, Neff=3.04,
Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
# This next test will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
# Make sure setting them as quantities gives the same results
H0 = u.Quantity(70, u.km / (u.s * u.Mpc))
T = u.Quantity(2.0, u.K)
cosmo = core.FlatLambdaCDM(H0=H0, Om0=0.27, Tcmb0=T, Neff=3.04, Ob0=0.05)
assert allclose(cosmo.Om0, 0.27)
assert allclose(cosmo.Ode0, 0.729975, rtol=1e-4)
assert allclose(cosmo.Ob0, 0.05)
assert allclose(cosmo.Odm0, 0.27 - 0.05)
assert allclose(cosmo.Ogamma0, 1.463285e-5, rtol=1e-4)
assert allclose(cosmo.Onu0, 1.01026e-5, rtol=1e-4)
assert allclose(cosmo.Ok0, 0.0)
assert allclose(cosmo.Om0 + cosmo.Ode0 + cosmo.Ogamma0 + cosmo.Onu0,
1.0, rtol=1e-6)
assert allclose(cosmo.Om(1) + cosmo.Ode(1) + cosmo.Ogamma(1) +
cosmo.Onu(1), 1.0, rtol=1e-6)
assert allclose(cosmo.Tcmb0, 2.0 * u.K)
assert allclose(cosmo.Tnu0, 1.4275317 * u.K, rtol=1e-5)
assert allclose(cosmo.Neff, 3.04)
assert allclose(cosmo.h, 0.7)
assert allclose(cosmo.H0, 70.0 * u.km / u.s / u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_units():
""" Test if the right units are being returned"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=2.0)
assert cosmo.comoving_distance(1.0).unit == u.Mpc
assert cosmo._comoving_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.comoving_transverse_distance(1.0).unit == u.Mpc
assert cosmo._comoving_transverse_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.angular_diameter_distance(1.0).unit == u.Mpc
assert cosmo.angular_diameter_distance_z1z2(1.0, 2.0).unit == u.Mpc
assert cosmo.luminosity_distance(1.0).unit == u.Mpc
assert cosmo.lookback_time(1.0).unit == u.Gyr
assert cosmo.lookback_distance(1.0).unit == u.Mpc
assert cosmo.H0.unit == u.km / u.Mpc / u.s
assert cosmo.H(1.0).unit == u.km / u.Mpc / u.s
assert cosmo.Tcmb0.unit == u.K
assert cosmo.Tcmb(1.0).unit == u.K
assert cosmo.Tcmb([0.0, 1.0]).unit == u.K
assert cosmo.Tnu0.unit == u.K
assert cosmo.Tnu(1.0).unit == u.K
assert cosmo.Tnu([0.0, 1.0]).unit == u.K
assert cosmo.arcsec_per_kpc_comoving(1.0).unit == u.arcsec / u.kpc
assert cosmo.arcsec_per_kpc_proper(1.0).unit == u.arcsec / u.kpc
assert cosmo.kpc_comoving_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.kpc_proper_per_arcmin(1.0).unit == u.kpc / u.arcmin
assert cosmo.critical_density(1.0).unit == u.g / u.cm ** 3
assert cosmo.comoving_volume(1.0).unit == u.Mpc ** 3
assert cosmo.age(1.0).unit == u.Gyr
assert cosmo.distmod(1.0).unit == u.mag
@pytest.mark.skipif('not HAS_SCIPY')
def test_distance_broadcast():
""" Test array shape broadcasting for functions with single
redshift inputs"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27,
m_nu=u.Quantity([0.0, 0.1, 0.011], u.eV))
z = np.linspace(0.1, 1, 6)
z_reshape2d = z.reshape(2, 3)
z_reshape3d = z.reshape(3, 2, 1)
# Things with units
methods = ['comoving_distance', 'luminosity_distance',
'comoving_transverse_distance', 'angular_diameter_distance',
'distmod', 'lookback_time', 'age', 'comoving_volume',
'differential_comoving_volume', 'kpc_comoving_per_arcmin']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert value_flat.unit == value_2d.unit
assert value_flat.unit == value_3d.unit
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Also test unitless ones
methods = ['absorption_distance', 'Om', 'Ode', 'Ok', 'H',
'w', 'de_density_scale', 'Onu', 'Ogamma',
'nu_relative_density']
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
# Test some dark energy models
methods = ['Om', 'Ode', 'w', 'de_density_scale']
for tcosmo in [core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.5),
core.wCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2),
core.w0waCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wa=-0.2),
core.wpwaCDM(H0=70, Om0=0.27, Ode0=0.5,
wp=-1.2, wa=-0.2, zp=0.9),
core.w0wzCDM(H0=70, Om0=0.27, Ode0=0.5, w0=-1.2, wz=0.1)]:
for method in methods:
g = getattr(cosmo, method)
value_flat = g(z)
assert value_flat.shape == z.shape
value_2d = g(z_reshape2d)
assert value_2d.shape == z_reshape2d.shape
value_3d = g(z_reshape3d)
assert value_3d.shape == z_reshape3d.shape
assert allclose(value_flat, value_2d.flatten())
assert allclose(value_flat, value_3d.flatten())
@pytest.mark.skipif('not HAS_SCIPY')
def test_clone():
""" Test clone operation"""
cosmo = core.FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
z = np.linspace(0.1, 3, 15)
# First, test with no changes, which should return same object
newclone = cosmo.clone()
assert newclone is cosmo
# Now change H0
# Note that H0 affects Ode0 because it changes Ogamma0
newclone = cosmo.clone(H0=60 * u.km / u.s / u.Mpc)
assert newclone is not cosmo
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 60.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert allclose(newclone.Tcmb0, cosmo.Tcmb0)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# Compare modified version with directly instantiated one
cmp = core.FlatLambdaCDM(H0=60 * u.km / u.s / u.Mpc, Om0=0.27,
Tcmb0=3.0 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Now try changing multiple things
newclone = cosmo.clone(name="New name", H0=65 * u.km / u.s / u.Mpc,
Tcmb0=2.8 * u.K)
assert newclone.__class__ == cosmo.__class__
assert not newclone.name == cosmo.name
assert not allclose(newclone.H0.value, cosmo.H0.value)
assert allclose(newclone.H0, 65.0 * u.km / u.s / u.Mpc)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.Ogamma0, cosmo.Ogamma0)
assert not allclose(newclone.Onu0, cosmo.Onu0)
assert not allclose(newclone.Tcmb0.value, cosmo.Tcmb0.value)
assert allclose(newclone.Tcmb0, 2.8 * u.K)
assert allclose(newclone.m_nu, cosmo.m_nu)
assert allclose(newclone.Neff, cosmo.Neff)
# And direct comparison
cmp = core.FlatLambdaCDM(name="New name", H0=65 * u.km / u.s / u.Mpc,
Om0=0.27, Tcmb0=2.8 * u.K)
assert newclone.__class__ == cmp.__class__
assert newclone.name == cmp.name
assert allclose(newclone.H0, cmp.H0)
assert allclose(newclone.Om0, cmp.Om0)
assert allclose(newclone.Ode0, cmp.Ode0)
assert allclose(newclone.Ok0, cmp.Ok0)
assert allclose(newclone.Ogamma0, cmp.Ogamma0)
assert allclose(newclone.Onu0, cmp.Onu0)
assert allclose(newclone.Tcmb0, cmp.Tcmb0)
assert allclose(newclone.m_nu, cmp.m_nu)
assert allclose(newclone.Neff, cmp.Neff)
assert allclose(newclone.Om(z), cmp.Om(z))
assert allclose(newclone.H(z), cmp.H(z))
assert allclose(newclone.luminosity_distance(z),
cmp.luminosity_distance(z))
# Try a dark energy class, make sure it can handle w params
cosmo = core.w0waCDM(name="test w0wa", H0=70 * u.km / u.s / u.Mpc,
Om0=0.27, Ode0=0.5, wa=0.1, Tcmb0=4.0 * u.K)
newclone = cosmo.clone(w0=-1.1, wa=0.2)
assert newclone.__class__ == cosmo.__class__
assert newclone.name == cosmo.name
assert allclose(newclone.H0, cosmo.H0)
assert allclose(newclone.Om0, cosmo.Om0)
assert allclose(newclone.Ode0, cosmo.Ode0)
assert allclose(newclone.Ok0, cosmo.Ok0)
assert not allclose(newclone.w0, cosmo.w0)
assert allclose(newclone.w0, -1.1)
assert not allclose(newclone.wa, cosmo.wa)
assert allclose(newclone.wa, 0.2)
# Now test exception if user passes non-parameter
with pytest.raises(AttributeError):
newclone = cosmo.clone(not_an_arg=4)
def test_xtfuncs():
""" Test of absorption and lookback integrand"""
cosmo = core.LambdaCDM(70, 0.3, 0.5)
z = np.array([2.0, 3.2])
assert allclose(cosmo.lookback_time_integrand(3), 0.052218976654969378,
rtol=1e-4)
assert allclose(cosmo.lookback_time_integrand(z),
[0.10333179, 0.04644541], rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(3), 3.3420145059180402,
rtol=1e-4)
assert allclose(cosmo.abs_distance_integrand(z),
[2.7899584, 3.44104758], rtol=1e-4)
def test_repr():
""" Test string representation of built in classes"""
cosmo = core.LambdaCDM(70, 0.3, 0.5)
expected = 'LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, '\
'Ode0=0.5, Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, '\
'Ob0=None)'
assert str(cosmo) == expected
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
expected = 'LambdaCDM(H0=70 km / (Mpc s), Om0=0.3, Ode0=0.5, '\
'Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0.01 0.01 0.01] eV, '\
'Ob0=None)'
assert str(cosmo) == expected
cosmo = core.FlatLambdaCDM(50.0, 0.27, Ob0=0.05)
expected = 'FlatLambdaCDM(H0=50 km / (Mpc s), Om0=0.27, '\
'Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, Ob0=0.05)'
assert str(cosmo) == expected
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8, name='test1')
expected = 'wCDM(name="test1", H0=60 km / (Mpc s), Om0=0.27, '\
'Ode0=0.6, w0=-0.8, Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0. 0. 0.] eV, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6, name='test2')
expected = 'FlatwCDM(name="test2", H0=65 km / (Mpc s), Om0=0.27, '\
'w0=-0.6, Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, '\
'Ob0=None)'
assert str(cosmo) == expected
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1, name='test3')
expected = 'w0waCDM(name="test3", H0=60 km / (Mpc s), Om0=0.25, '\
'Ode0=0.4, w0=-0.6, wa=0.1, Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0. 0. 0.] eV, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2, name='test4',
Ob0=0.0456789)
expected = 'Flatw0waCDM(name="test4", H0=55 km / (Mpc s), Om0=0.35, '\
'w0=-0.9, Tcmb0=2.725 K, Neff=3.04, m_nu=[ 0. 0. 0.] eV, '\
'Ob0=0.0457)'
assert str(cosmo) == expected
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2,
zp=0.3, name='test5')
expected = 'wpwaCDM(name="test5", H0=50 km / (Mpc s), Om0=0.3, '\
'Ode0=0.3, wp=-0.9, wa=-0.2, zp=0.3, Tcmb0=2.725 K, '\
'Neff=3.04, m_nu=[ 0. 0. 0.] eV, Ob0=None)'
assert str(cosmo) == expected
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2,
m_nu=u.Quantity([0.001, 0.01, 0.015], u.eV))
expected = 'w0wzCDM(H0=55 km / (Mpc s), Om0=0.4, Ode0=0.8, w0=-1.05, '\
'wz=-0.2 Tcmb0=2.725 K, Neff=3.04, '\
'm_nu=[ 0.001 0.01 0.015] eV, Ob0=None)'
assert str(cosmo) == expected
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_z1():
""" Test a flat cosmology at z=1 against several other on-line
calculators.
"""
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
z = 1
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (http://adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
# The order of values below is Wright, Kempner, iCosmos'
assert allclose(cosmo.comoving_distance(z),
[3364.5, 3364.8, 3364.7988] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.angular_diameter_distance(z),
[1682.3, 1682.4, 1682.3994] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.luminosity_distance(z),
[6729.2, 6729.6, 6729.5976] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.lookback_time(z),
[7.841, 7.84178, 7.843] * u.Gyr, rtol=1e-3)
assert allclose(cosmo.lookback_distance(z),
[2404.0, 2404.24, 2404.4] * u.Mpc, rtol=1e-3)
def test_zeroing():
""" Tests if setting params to 0s always respects that"""
# Make sure Ode = 0 behaves that way
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0)
assert allclose(cosmo.Ode([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Ode(1), 0)
# Ogamma0 and Onu
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.27, Tcmb0=0.0)
assert allclose(cosmo.Ogamma(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Ogamma([0, 1, 2, 3]), [0, 0, 0, 0])
assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
assert allclose(cosmo.Onu([0, 1, 2, 3]), [0, 0, 0, 0])
# Obaryon
cosmo = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Ob0=0.0)
assert allclose(cosmo.Ob([0, 1, 2, 3]), [0, 0, 0, 0])
# This class is to test whether the routines work correctly
# if one only overloads w(z)
class test_cos_sub(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=0.0,
name="test_cos")
self._w0 = -0.9
def w(self, z):
return self._w0 * np.ones_like(z)
# Similar, but with neutrinos
class test_cos_subnu(core.FLRW):
def __init__(self):
core.FLRW.__init__(self, 70.0, 0.27, 0.73, Tcmb0=3.0,
m_nu = 0.1 * u.eV, name="test_cos_nu")
self._w0 = -0.8
def w(self, z):
return self._w0 * np.ones_like(z)
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_subclass():
# This is the comparison object
z = [0.2, 0.4, 0.6, 0.9]
cosmo = core.wCDM(H0=70, Om0=0.27, Ode0=0.73, w0=-0.9, Tcmb0=0.0)
# Values taken from Ned Wrights advanced cosmo calculator, Aug 17 2012
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Now try the subclass that only gives w(z)
cosmo = test_cos_sub()
assert allclose(cosmo.luminosity_distance(z),
[975.5, 2158.2, 3507.3, 5773.1] * u.Mpc, rtol=1e-3)
# Test efunc
assert allclose(cosmo.efunc(1.0), 1.7489240754, rtol=1e-5)
assert allclose(cosmo.efunc([0.5, 1.0]),
[1.31744953, 1.7489240754], rtol=1e-5)
assert allclose(cosmo.inv_efunc([0.5, 1.0]),
[0.75904236, 0.57178011], rtol=1e-5)
# Test de_density_scale
assert allclose(cosmo.de_density_scale(1.0), 1.23114444, rtol=1e-4)
assert allclose(cosmo.de_density_scale([0.5, 1.0]),
[1.12934694, 1.23114444], rtol=1e-4)
# Add neutrinos for efunc, inv_efunc
@pytest.mark.skipif('not HAS_SCIPY')
def test_varyde_lumdist_mathematica():
"""Tests a few varying dark energy EOS models against a mathematica
computation"""
# w0wa models
z = np.array([0.2, 0.4, 0.9, 1.2])
cosmo = core.w0waCDM(H0=70, Om0=0.2, Ode0=0.8, w0=-1.1, wa=0.2, Tcmb0=0.0)
assert allclose(cosmo.w0, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.luminosity_distance(z),
[1004.0, 2268.62, 6265.76, 9061.84] * u.Mpc, rtol=1e-4)
assert allclose(cosmo.de_density_scale(0.0), 1.0, rtol=1e-5)
assert allclose(cosmo.de_density_scale([0.0, 0.5, 1.5]),
[1.0, 0.9246310669529021, 0.9184087000251957])
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.0, Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[971.667, 2141.67, 5685.96, 8107.41] * u.Mpc, rtol=1e-4)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=-0.5,
Tcmb0=0.0)
assert allclose(cosmo.luminosity_distance(z),
[974.087, 2157.08, 5783.92, 8274.08] * u.Mpc, rtol=1e-4)
# wpwa models
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.5,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.luminosity_distance(z),
[1010.81, 2294.45, 6369.45, 9218.95] * u.Mpc, rtol=1e-4)
cosmo = core.wpwaCDM(H0=70, Om0=0.2, Ode0=0.8, wp=-1.1, wa=0.2, zp=0.9,
Tcmb0=0.0)
assert allclose(cosmo.wp, -1.1)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.9)
assert allclose(cosmo.luminosity_distance(z),
[1013.68, 2305.3, 6412.37, 9283.33] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_matter():
# Test non-relativistic matter evolution
tcos = core.FlatLambdaCDM(70.0, 0.3, Ob0=0.045)
assert allclose(tcos.Om0, 0.3)
assert allclose(tcos.H0, 70.0 * u.km / u.s / u.Mpc)
assert allclose(tcos.Om(0), 0.3)
assert allclose(tcos.Ob(0), 0.045)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Om(z), [0.3, 0.59112134, 0.77387435, 0.91974179],
rtol=1e-4)
assert allclose(tcos.Ob(z),
[0.045, 0.08866820, 0.11608115, 0.13796127], rtol=1e-4)
assert allclose(tcos.Odm(z), [0.255, 0.50245314, 0.6577932, 0.78178052],
rtol=1e-4)
# Consistency of dark and baryonic matter evolution with all
# non-relativistic matter
assert allclose(tcos.Ob(z) + tcos.Odm(z), tcos.Om(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_ocurv():
# Test Ok evolution
# Flat, boring case
tcos = core.FlatLambdaCDM(70.0, 0.3)
assert allclose(tcos.Ok0, 0.0)
assert allclose(tcos.Ok(0), 0.0)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ok(z), [0.0, 0.0, 0.0, 0.0],
rtol=1e-6)
# Not flat
tcos = core.LambdaCDM(70.0, 0.3, 0.5, Tcmb0=u.Quantity(0.0, u.K))
assert allclose(tcos.Ok0, 0.2)
assert allclose(tcos.Ok(0), 0.2)
assert allclose(tcos.Ok(z), [0.2, 0.22929936, 0.21621622, 0.17307692],
rtol=1e-4)
# Test the sum; note that Ogamma/Onu are 0
assert allclose(tcos.Ok(z) + tcos.Om(z) + tcos.Ode(z),
[1.0, 1.0, 1.0, 1.0], rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ode():
# Test Ode evolution, turn off neutrinos, cmb
tcos = core.FlatLambdaCDM(70.0, 0.3, Tcmb0=0)
assert allclose(tcos.Ode0, 0.7)
assert allclose(tcos.Ode(0), 0.7)
z = np.array([0.0, 0.5, 1.0, 2.0])
assert allclose(tcos.Ode(z), [0.7, 0.408759, 0.2258065, 0.07954545],
rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_ogamma():
"""Tests the effects of changing the temperature of the CMB"""
# Tested against Ned Wright's advanced cosmology calculator,
# Sep 7 2012. The accuracy of our comparision is limited by
# how many digits it outputs, which limits our test to about
# 0.2% accuracy. The NWACC does not allow one
# to change the number of nuetrino species, fixing that at 3.
# Also, inspection of the NWACC code shows it uses inaccurate
# constants at the 0.2% level (specifically, a_B),
# so we shouldn't expect to match it that well. The integral is
# also done rather crudely. Therefore, we should not expect
# the NWACC to be accurate to better than about 0.5%, which is
# unfortunate, but reflects a problem with it rather than this code.
# More accurate tests below using Mathematica
z = np.array([1.0, 10.0, 500.0, 1000.0])
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.9, 858.2, 26.855, 13.642] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.8, 857.9, 26.767, 13.582] * u.Mpc, rtol=5e-4)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.4, 856.6, 26.489, 13.405] * u.Mpc, rtol=5e-4)
# Next compare with doing the integral numerically in Mathematica,
# which allows more precision in the test. It is at least as
# good as 0.01%, possibly better
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.91, 858.205, 26.8586, 13.6469] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.76, 857.817, 26.7688, 13.5841] * u.Mpc, rtol=1e-5)
cosmo = core.FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=4.0, Neff=3.04)
assert allclose(cosmo.angular_diameter_distance(z),
[1651.21, 856.411, 26.4845, 13.4028] * u.Mpc, rtol=1e-5)
# Just to be really sure, we also do a version where the integral
# is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
Ogamma0h2 = 4 * 5.670373e-8 / 299792458.0 ** 3 * 2.725 ** 4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0) * 3.04
Or0 = (Ogamma0h2 + Onu0h2) / 0.7 ** 2
Om0 = 1.0 - Or0
hubdis = (299792.458 / 70.0) * u.Mpc
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=2.725, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
# And integers for z
assert allclose(cosmo.comoving_distance(z.astype(np.int)),
targvals, rtol=1e-5)
# Try Tcmb0 = 4
Or0 *= (4.0 / 2.725) ** 4
Om0 = 1.0 - Or0
cosmo = core.FlatLambdaCDM(H0=70, Om0=Om0, Tcmb0=4.0, Neff=3.04)
targvals = 2.0 * hubdis * \
(np.sqrt((1.0 + Or0 * z) / (1.0 + z)) - 1.0) / (Or0 - 1.0)
assert allclose(cosmo.comoving_distance(z), targvals, rtol=1e-5)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tcmb():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=2.5)
assert allclose(cosmo.Tcmb0, 2.5 * u.K)
assert allclose(cosmo.Tcmb(2), 7.5 * u.K)
z = [0.0, 1.0, 2.0, 3.0, 9.0]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
# Make sure it's the same for integers
z = [0, 1, 2, 3, 9]
assert allclose(cosmo.Tcmb(z),
[2.5, 5.0, 7.5, 10.0, 25.0] * u.K, rtol=1e-6)
@pytest.mark.skipif('not HAS_SCIPY')
def test_tnu():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(cosmo.Tnu0, 2.1412975665108247 * u.K, rtol=1e-6)
assert allclose(cosmo.Tnu(2), 6.423892699532474 * u.K, rtol=1e-6)
z = [0.0, 1.0, 2.0, 3.0]
expected = [2.14129757, 4.28259513, 6.4238927, 8.56519027] * u.K
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
# Test for integers
z = [0, 1, 2, 3]
assert allclose(cosmo.Tnu(z), expected, rtol=1e-6)
def test_efunc_vs_invefunc():
""" Test that efunc and inv_efunc give inverse values"""
# Note that all of the subclasses here don't need
# scipy because they don't need to call de_density_scale
# The test following this tests the case where that is needed.
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# Below are the 'standard' included cosmologies
# We do the non-standard case in test_efunc_vs_invefunc_flrw,
# since it requires scipy
cosmo = core.LambdaCDM(70, 0.3, 0.5)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.LambdaCDM(70, 0.3, 0.5, m_nu=u.Quantity(0.01, u.eV))
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatLambdaCDM(50.0, 0.27)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wCDM(60.0, 0.27, 0.6, w0=-0.8)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.FlatwCDM(65.0, 0.27, w0=-0.6)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0waCDM(60.0, 0.25, 0.4, w0=-0.6, wa=0.1)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.Flatw0waCDM(55.0, 0.35, w0=-0.9, wa=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.wpwaCDM(50.0, 0.3, 0.3, wp=-0.9, wa=-0.2, zp=0.3)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
cosmo = core.w0wzCDM(55.0, 0.4, 0.8, w0=-1.05, wz=-0.2)
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_efunc_vs_invefunc_flrw():
""" Test that efunc and inv_efunc give inverse values"""
z0 = 0.5
z = np.array([0.5, 1.0, 2.0, 5.0])
# FLRW is abstract, so requires test_cos_sub defined earlier
# This requires scipy, unlike the built-ins, because it
# calls de_density_scale, which has an integral in it
cosmo = test_cos_sub()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
# Add neutrinos
cosmo = test_cos_subnu()
assert allclose(cosmo.efunc(z0), 1.0 / cosmo.inv_efunc(z0))
assert allclose(cosmo.efunc(z), 1.0 / cosmo.inv_efunc(z))
@pytest.mark.skipif('not HAS_SCIPY')
def test_kpc_methods():
cosmo = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(cosmo.arcsec_per_kpc_comoving(3),
0.0317179167 * u.arcsec / u.kpc)
assert allclose(cosmo.arcsec_per_kpc_proper(3),
0.1268716668 * u.arcsec / u.kpc)
assert allclose(cosmo.kpc_comoving_per_arcmin(3),
1891.6753126 * u.kpc / u.arcmin)
assert allclose(cosmo.kpc_proper_per_arcmin(3),
472.918828 * u.kpc / u.arcmin)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_volume():
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test against ned wright's calculator (cubic Gpc)
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision
assert allclose(c_flat.comoving_volume(redshifts), wright_flat,
rtol=1e-2)
assert allclose(c_open.comoving_volume(redshifts),
wright_open, rtol=1e-2)
assert allclose(c_closed.comoving_volume(redshifts),
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_differential_comoving_volume():
from scipy.integrate import quad
c_flat = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.73, Tcmb0=0.0)
c_open = core.LambdaCDM(H0=70, Om0=0.27, Ode0=0.0, Tcmb0=0.0)
c_closed = core.LambdaCDM(H0=70, Om0=2, Ode0=0.0, Tcmb0=0.0)
# test that integration of differential_comoving_volume()
# yields same as comoving_volume()
redshifts = np.array([0.5, 1, 2, 3, 5, 9])
wright_flat = np.array([29.123, 159.529, 630.427, 1178.531, 2181.485,
3654.802]) * u.Gpc**3
wright_open = np.array([20.501, 99.019, 380.278, 747.049, 1558.363,
3123.814]) * u.Gpc**3
wright_closed = np.array([12.619, 44.708, 114.904, 173.709, 258.82,
358.992]) * u.Gpc**3
# The wright calculator isn't very accurate, so we use a rather
# modest precision.
ftemp = lambda x: c_flat.differential_comoving_volume(x).value
otemp = lambda x: c_open.differential_comoving_volume(x).value
ctemp = lambda x: c_closed.differential_comoving_volume(x).value
# Multiply by solid_angle (4 * pi)
assert allclose(np.array([4.0 * np.pi * quad(ftemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_flat, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(otemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_open, rtol=1e-2)
assert allclose(np.array([4.0 * np.pi * quad(ctemp, 0, redshift)[0]
for redshift in redshifts]) * u.Mpc**3,
wright_closed, rtol=1e-2)
@pytest.mark.skipif('not HAS_SCIPY')
def test_flat_open_closed_icosmo():
""" Test against the tabulated values generated from icosmo.org
with three example cosmologies (flat, open and closed).
"""
cosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
cosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
cosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_flat), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_open), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
redshifts, dm, da, dl = np.loadtxt(StringIO(cosmo_closed), unpack=1)
dm = dm * u.Mpc
da = da * u.Mpc
dl = dl * u.Mpc
cosmo = core.LambdaCDM(H0=70, Om0=2, Ode0=0.1, Tcmb0=0.0)
assert allclose(cosmo.comoving_transverse_distance(redshifts), dm)
assert allclose(cosmo.angular_diameter_distance(redshifts), da)
assert allclose(cosmo.luminosity_distance(redshifts), dl)
@pytest.mark.skipif('not HAS_SCIPY')
def test_integral():
# Test integer vs. floating point inputs
cosmo = core.LambdaCDM(H0=73.2, Om0=0.3, Ode0=0.50)
assert allclose(cosmo.comoving_distance(3),
cosmo.comoving_distance(3.0), rtol=1e-7)
assert allclose(cosmo.comoving_distance([1, 2, 3, 5]),
cosmo.comoving_distance([1.0, 2.0, 3.0, 5.0]),
rtol=1e-7)
assert allclose(cosmo.efunc(6), cosmo.efunc(6.0), rtol=1e-7)
assert allclose(cosmo.efunc([1, 2, 6]),
cosmo.efunc([1.0, 2.0, 6.0]), rtol=1e-7)
assert allclose(cosmo.inv_efunc([1, 2, 6]),
cosmo.inv_efunc([1.0, 2.0, 6.0]), rtol=1e-7)
def test_wz():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
assert allclose(cosmo.w(1.0), -1.)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-1., -1, -1, -1, -1, -1])
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.5, -0.5, -0.5, -0.5, -0.5, -0.5])
assert allclose(cosmo.w0, -0.5)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wz=0.5)
assert allclose(cosmo.w(1.0), -0.5)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1.0, -0.75, -0.5, -0.25, 0.15])
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wz, 0.5)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.w0, -1.0)
assert allclose(cosmo.wa, -0.5)
assert allclose(cosmo.w(1.0), -1.25)
assert allclose(cosmo.w([0.0, 0.5, 1.0, 1.5, 2.3]),
[-1, -1.16666667, -1.25, -1.3, -1.34848485])
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.wp, -0.9)
assert allclose(cosmo.wa, 0.2)
assert allclose(cosmo.zp, 0.5)
assert allclose(cosmo.w(0.5), -0.9)
assert allclose(cosmo.w([0.1, 0.2, 0.5, 1.5, 2.5, 11.5]),
[-0.94848485, -0.93333333, -0.9, -0.84666667,
-0.82380952, -0.78266667])
@pytest.mark.skipif('not HAS_SCIPY')
def test_de_densityscale():
cosmo = core.LambdaCDM(H0=70, Om0=0.3, Ode0=0.70)
z = np.array([0.1, 0.2, 0.5, 1.5, 2.5])
assert allclose(cosmo.de_density_scale(z),
[1.0, 1.0, 1.0, 1.0, 1.0])
# Integer check
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wCDM(H0=70, Om0=0.3, Ode0=0.60, w0=-0.5)
assert allclose(cosmo.de_density_scale(z),
[1.15369, 1.31453, 1.83712, 3.95285, 6.5479],
rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0wzCDM(H0=70, Om0=0.3, Ode0=0.50, w0=-1, wz=0.5)
assert allclose(cosmo.de_density_scale(z),
[0.746048, 0.5635595, 0.25712378, 0.026664129,
0.0035916468], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.w0waCDM(H0=70, Om0=0.3, Ode0=0.70, w0=-1, wa=-0.5)
assert allclose(cosmo.de_density_scale(z),
[0.9934201, 0.9767912, 0.897450,
0.622236, 0.4458753], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
cosmo = core.wpwaCDM(H0=70, Om0=0.3, Ode0=0.70, wp=-0.9,
wa=0.2, zp=0.5)
assert allclose(cosmo.de_density_scale(z),
[1.012246048, 1.0280102, 1.087439,
1.324988, 1.565746], rtol=1e-4)
assert allclose(cosmo.de_density_scale(3),
cosmo.de_density_scale(3.0), rtol=1e-7)
assert allclose(cosmo.de_density_scale([1, 2, 3]),
cosmo.de_density_scale([1., 2., 3.]), rtol=1e-7)
@pytest.mark.skipif('not HAS_SCIPY')
def test_age():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_time, 13.889094057856937 * u.Gyr)
assert allclose(tcos.age(4), 1.5823603508870991 * u.Gyr)
assert allclose(tcos.age([1., 5.]),
[5.97113193, 1.20553129] * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.97113193, 1.20553129] * u.Gyr)
# Add relativistic species
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0)
assert allclose(tcos.age(4), 1.5773003779230699 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.96344942, 1.20093077] * u.Gyr)
# And massive neutrinos
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=3.0,
m_nu = 0.1 * u.eV)
assert allclose(tcos.age(4), 1.5546485439853412 * u.Gyr)
assert allclose(tcos.age([1, 5]), [5.88448152, 1.18383759] * u.Gyr)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distmod():
# WMAP7 but with Omega_relativisitic = 0
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.hubble_distance, 4258.415596590909 * u.Mpc)
assert allclose(tcos.distmod([1, 5]),
[44.124857, 48.40167258] * u.mag)
assert allclose(tcos.distmod([1., 5.]),
[44.124857, 48.40167258] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_neg_distmod():
# Cosmology with negative luminosity distances (perfectly okay,
# if obscure)
tcos = core.LambdaCDM(70, 0.2, 1.3, Tcmb0=0)
assert allclose(tcos.luminosity_distance([50, 100]),
[16612.44047622, -46890.79092244] * u.Mpc)
assert allclose(tcos.distmod([50, 100]),
[46.102167189, 48.355437790944] * u.mag)
@pytest.mark.skipif('not HAS_SCIPY')
def test_critical_density():
# WMAP7 but with Omega_relativistic = 0
# These tests will fail if astropy.const starts returning non-mks
# units by default; see the comment at the top of core.py
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.critical_density0,
9.31000324385361e-30 * u.g / u.cm**3)
assert allclose(tcos.critical_density0,
tcos.critical_density(0))
assert allclose(tcos.critical_density([1, 5]),
[2.70362491e-29, 5.53758986e-28] * u.g / u.cm**3)
assert allclose(tcos.critical_density([1., 5.]),
[2.70362491e-29, 5.53758986e-28] * u.g / u.cm**3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_distance_z1z2():
tcos = core.LambdaCDM(100, 0.3, 0.8, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_distance_z1z2((1, 2), (3, 4, 5))
# Comoving distances are invertible
assert allclose(tcos._comoving_distance_z1z2(1, 2),
-tcos._comoving_distance_z1z2(2, 1))
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (3767.90579253,
2386.25591391,
-1381.64987862,
2893.11776663,
174.1524683) * u.Mpc
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_comoving_transverse_distance_z1z2():
tcos = core.FlatLambdaCDM(100, 0.3, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos._comoving_transverse_distance_z1z2((1, 2), (3, 4, 5))
# Tests that should actually work, target values computed with
# http://www.astro.multivax.de:8000/phillip/angsiz_prog/README.HTML
# Kayser, Helbig, and Schramm (Astron.Astrophys. 318 (1997) 680-686)
assert allclose(tcos._comoving_transverse_distance_z1z2(1, 2),
1313.2232194828466 * u.Mpc)
# In a flat universe comoving distance and comoving transverse
# distance are identical
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
assert allclose(tcos._comoving_distance_z1z2(z1, z2),
tcos._comoving_transverse_distance_z1z2(z1, z2))
# Test non-flat cases to avoid simply testing
# comoving_distance_z1z2. Test array, array case.
tcos = core.LambdaCDM(100, 0.3, 0.5, Tcmb0=0.0)
results = (3535.931375645655,
2226.430046551708,
-1208.6817970036532,
2595.567367601969,
151.36592003406884) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
# Test positive curvature with scalar, array combination.
tcos = core.LambdaCDM(100, 1.0, 0.2, Tcmb0=0.0)
z1 = 0.1
z2 = 0, 0.1, 0.2, 0.5, 1.1, 2
results = (-281.31602666724865,
0.,
248.58093707820436,
843.9331377460543,
1618.6104987686672,
2287.5626543279927) * u.Mpc
assert allclose(tcos._comoving_transverse_distance_z1z2(z1, z2),
results)
@pytest.mark.skipif('not HAS_SCIPY')
def test_angular_diameter_distance_z1z2():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
with pytest.raises(ValueError): # test diff size z1, z2 fail
tcos.angular_diameter_distance_z1z2([1, 2], [3, 4, 5])
# Tests that should actually work
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
646.22968662822018 * u.Mpc)
z1 = 0, 0, 2, 0.5, 1
z2 = 2, 1, 1, 2.5, 1.1
results = (1760.0628637762106,
1670.7497657219858,
-969.34452994,
1159.0970895962193,
115.72768186186921) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(z1, z2),
results)
z1 = 0.1
z2 = 0.1, 0.2, 0.5, 1.1, 2
results = (0.,
332.09893173,
986.35635069,
1508.37010062,
1621.07937976) * u.Mpc
assert allclose(tcos.angular_diameter_distance_z1z2(0.1, z2),
results)
# Non-flat (positive Ok0) test
tcos = core.LambdaCDM(H0=70.4, Om0=0.2, Ode0=0.5, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
620.1175337852428 * u.Mpc)
# Non-flat (negative Ok0) test
tcos = core.LambdaCDM(H0=100, Om0=2, Ode0=1, Tcmb0=0.0)
assert allclose(tcos.angular_diameter_distance_z1z2(1, 2),
228.42914659246014 * u.Mpc)
@pytest.mark.skipif('not HAS_SCIPY')
def test_absorption_distance():
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0=0.0)
assert allclose(tcos.absorption_distance([1, 3]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance([1., 3.]),
[1.72576635, 7.98685853])
assert allclose(tcos.absorption_distance(3), 7.98685853)
assert allclose(tcos.absorption_distance(3.), 7.98685853)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_basic():
# Test no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Neff=4.05,
m_nu=u.Quantity(0, u.eV))
assert allclose(tcos.Neff, 4.05)
assert not tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 4
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.0, 0.0, 0.0] * u.eV)
assert allclose(tcos.nu_relative_density(1.), 0.22710731766 * 4.05,
rtol=1e-6)
assert allclose(tcos.nu_relative_density(1), 0.22710731766 * 4.05,
rtol=1e-6)
# Alternative no neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, Tcmb0 = 0 * u.K,
m_nu=u.Quantity(0.4, u.eV))
assert not tcos.has_massive_nu
assert tcos.m_nu is None
# Test basic setting, retrieval of values
tcos = core.FlatLambdaCDM(70.4, 0.272,
m_nu=u.Quantity([0.0, 0.01, 0.02], u.eV))
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.0, 0.01, 0.02] * u.eV)
# All massive neutrinos case
tcos = core.FlatLambdaCDM(70.4, 0.272, m_nu=u.Quantity(0.1, u.eV),
Neff=3.1)
assert allclose(tcos.Neff, 3.1)
assert tcos.has_massive_nu
mnu = tcos.m_nu
assert len(mnu) == 3
assert mnu.unit == u.eV
assert allclose(mnu, [0.1, 0.1, 0.1] * u.eV)
@pytest.mark.skipif('not HAS_SCIPY')
def test_distances():
# Test distance calculations for various special case
# scenarios (no relativistic species, normal, massive neutrinos)
# These do not come from external codes -- they are just internal
# checks to make sure nothing changes if we muck with the distance
# calculators
z = np.array([1.0, 2.0, 3.0, 4.0])
# The pattern here is: no relativistic species, the relativistic
# species with massless neutrinos, then massive neutrinos
cos = core.LambdaCDM(75.0, 0.25, 0.5, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2953.93001902, 4616.7134253, 5685.07765971,
6440.80611897] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.25, 0.6, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3037.12620424, 4776.86236327, 5889.55164479,
6671.85418235] * u.Mpc, rtol=1e-4)
cos = core.LambdaCDM(75.0, 0.3, 0.4, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2471.80626824, 3567.1902565 , 4207.15995626,
4638.20476018] * u.Mpc, rtol=1e-4)
# Flat
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3180.83488552, 5060.82054204, 6253.6721173,
7083.5374303] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3180.42662867, 5059.60529655, 6251.62766102,
7080.71698117] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.54183142, 3371.91131264, 3988.40711188,
4409.09346922] * u.Mpc, rtol=1e-4)
# Add w
cos = core.FlatwCDM(75.0, 0.25, w0=-1.05, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3216.8296894 , 5117.2097601 , 6317.05995437,
7149.68648536] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.95, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3143.56537758, 5000.32196494, 6184.11444601,
7009.80166062] * u.Mpc, rtol=1e-4)
cos = core.FlatwCDM(75.0, 0.25, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.76035371, 3372.1971387, 3988.71362289,
4409.40817174] * u.Mpc, rtol=1e-4)
# Non-flat w
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2849.6163356 , 4428.71661565, 5450.97862778,
6179.37072324] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-1.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2904.35580229, 4511.11471267, 5543.43643353,
6275.9206788] * u.Mpc, rtol=1e-4)
cos = core.wCDM(75.0, 0.25, 0.4, w0=-0.9, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2473.32522734, 3581.54519631, 4232.41674426,
4671.83818117] * u.Mpc, rtol=1e-4)
# w0wa
cos = core.w0waCDM(75.0, 0.3, 0.6, w0=-0.9, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2937.7807638, 4572.59950903, 5611.52821924,
6339.8549956] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2907.34722624, 4539.01723198, 5593.51611281,
6342.3228444] * u.Mpc, rtol=1e-4)
cos = core.w0waCDM(75.0, 0.25, 0.5, w0=-0.9, wa=0.1, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2507.18336722, 3633.33231695, 4292.44746919,
4736.35404638] * u.Mpc, rtol=1e-4)
# Flatw0wa
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3123.29892781, 4956.15204302, 6128.15563818,
6948.26480378] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[3122.92671907, 4955.03768936, 6126.25719576,
6945.61856513] * u.Mpc, rtol=1e-4)
cos = core.Flatw0waCDM(75.0, 0.25, w0=-0.95, wa=0.15, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(10.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2337.70072701, 3372.13719963, 3988.6571093,
4409.35399673] * u.Mpc, rtol=1e-4)
# wpwa
cos = core.wpwaCDM(75.0, 0.3, 0.6, wp=-0.9, zp=0.5, wa=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[2954.68975298, 4599.83254834, 5643.04013201,
6373.36147627] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=0.4, wa=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2919.00656215, 4558.0218123, 5615.73412391,
6366.10224229] * u.Mpc, rtol=1e-4)
cos = core.wpwaCDM(75.0, 0.25, 0.5, wp=-0.9, zp=1.0, wa=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2629.48489827, 3874.13392319, 4614.31562397,
5116.51184842] * u.Mpc, rtol=1e-4)
# w0wz
cos = core.w0wzCDM(75.0, 0.3, 0.6, w0=-0.9, wz=0.1, Tcmb0=0.0)
assert allclose(cos.comoving_distance(z),
[3051.68786716, 4756.17714818, 5822.38084257,
6562.70873734] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1,
Tcmb0=3.0, Neff=3, m_nu=u.Quantity(0.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2997.8115653 , 4686.45599916, 5764.54388557,
6524.17408738] * u.Mpc, rtol=1e-4)
cos = core.w0wzCDM(75.0, 0.25, 0.5, w0=-0.9, wz=0.1, Tcmb0=3.0,
Neff=4, m_nu=u.Quantity(5.0, u.eV))
assert allclose(cos.comoving_distance(z),
[2676.73467639, 3940.57967585, 4686.90810278,
5191.54178243] * u.Mpc, rtol=1e-4)
# Also test different numbers of massive neutrinos
# for FlatLambdaCDM to give the scalar nu density functions a
# work out
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 0, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2777.71589173, 4186.91111666, 5046.0300719,
5636.10397302] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([10.0, 5, 0], u.eV))
assert allclose(cos.comoving_distance(z),
[2636.48149391, 3913.14102091, 4684.59108974,
5213.07557084] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0,
m_nu=u.Quantity([4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2563.5093049 , 3776.63362071, 4506.83448243,
5006.50158829] * u.Mpc, rtol=1e-4)
cos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=4.2,
m_nu=u.Quantity([1.0, 4.0, 5, 9], u.eV))
assert allclose(cos.comoving_distance(z),
[2525.58017482, 3706.87633298, 4416.58398847,
4901.96669755] * u.Mpc, rtol=1e-4)
@pytest.mark.skipif('not HAS_SCIPY')
def test_massivenu_density():
# Testing neutrino density calculation
# Simple test cosmology, where we compare rho_nu and rho_gamma
# against the exact formula (eq 24/25 of Komatsu et al. 2011)
# computed using Mathematica. The approximation we use for f(y)
# is only good to ~ 0.5% (with some redshift dependence), so that's
# what we test to.
ztest = np.array([0.0, 1.0, 2.0, 10.0, 1000.0])
nuprefac = 7.0 / 8.0 * (4.0 / 11.0) ** (4.0 / 3.0)
# First try 3 massive neutrinos, all 100 eV -- note this is a universe
# seriously dominated by neutrinos!
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(100.0, u.eV))
assert tcos.has_massive_nu
assert tcos.Neff == 3
nurel_exp = nuprefac * tcos.Neff * np.array([171969, 85984.5, 57323,
15633.5, 171.801])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp, rtol=5e-3)
assert allclose(tcos.efunc([0.0, 1.0]), [1.0, 7.46144727668], rtol=5e-3)
# Next, slightly less massive
tcos = core.FlatLambdaCDM(75.0, 0.25, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.25, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([429.924, 214.964, 143.312,
39.1005, 1.11086])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
# For this one also test Onu directly
onu_exp = np.array([0.01890217, 0.05244681, 0.0638236,
0.06999286, 0.1344951])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# And fairly light
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3,
m_nu=u.Quantity(0.01, u.eV))
nurel_exp = nuprefac * tcos.Neff * np.array([17.2347, 8.67345, 5.84348,
1.90671, 1.00021])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00066599, 0.00172677, 0.0020732,
0.00268404, 0.0978313])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
assert allclose(tcos.efunc([1.0, 2.0]), [1.76225893, 2.97022048],
rtol=1e-4)
assert allclose(tcos.inv_efunc([1.0, 2.0]), [0.5674535, 0.33667534],
rtol=1e-4)
# Now a mixture of neutrino masses, with non-integer Neff
tcos = core.FlatLambdaCDM(80.0, 0.30, Tcmb0=3.0, Neff=3.04,
m_nu=u.Quantity([0.0, 0.01, 0.25], u.eV))
nurel_exp = nuprefac * tcos.Neff * \
np.array([149.386233, 74.87915, 50.0518,
14.002403, 1.03702333])
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
onu_exp = np.array([0.00584959, 0.01493142, 0.01772291,
0.01963451, 0.10227728])
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
# Integer redshifts
ztest = ztest.astype(np.int)
assert allclose(tcos.nu_relative_density(ztest), nurel_exp,
rtol=5e-3)
assert allclose(tcos.Onu(ztest), onu_exp, rtol=5e-3)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value():
# These are tests of expected values, and hence have less precision
# than the roundtrip tests below (test_z_at_value_roundtrip);
# here we have to worry about the cosmological calculations
# giving slightly different values on different architectures,
# there we are checking internal consistency on the same architecture
# and so can be more demanding
z_at_value = funcs.z_at_value
cosmo = core.Planck13
d = cosmo.luminosity_distance(3)
assert allclose(z_at_value(cosmo.luminosity_distance, d), 3,
rtol=1e-8)
assert allclose(z_at_value(cosmo.age, 2 * u.Gyr), 3.198122684356,
rtol=1e-6)
assert allclose(z_at_value(cosmo.luminosity_distance, 1e4 * u.Mpc),
1.3685790653802761, rtol=1e-6)
assert allclose(z_at_value(cosmo.lookback_time, 7 * u.Gyr),
0.7951983674601507, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmax=2), 0.68127769625288614, rtol=1e-6)
assert allclose(z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc,
zmin=2.5), 3.7914908028272083, rtol=1e-6)
assert allclose(z_at_value(cosmo.distmod, 46 * u.mag),
1.9913891680278133, rtol=1e-6)
# test behaviour when the solution is outside z limits (should
# raise a CosmologyError)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmax=0.5)
with pytest.raises(core.CosmologyError):
z_at_value(cosmo.angular_diameter_distance, 1500*u.Mpc, zmin=4.)
@pytest.mark.skipif('not HAS_SCIPY')
def test_z_at_value_roundtrip():
"""
Calculate values from a known redshift, and then check that
z_at_value returns the right answer.
"""
z = 0.5
# Skip Ok, w, de_density_scale because in the Planck13 cosmology
# they are redshift independent and hence uninvertable,
# *_distance_z1z2 methods take multiple arguments, so require
# special handling
# clone isn't a redshift-dependent method
skip = ('Ok',
'angular_diameter_distance_z1z2',
'clone',
'de_density_scale', 'w')
import inspect
methods = inspect.getmembers(core.Planck13, predicate=inspect.ismethod)
for name, func in methods:
if name.startswith('_') or name in skip:
continue
print('Round-trip testing {0}'.format(name))
fval = func(z)
# we need zmax here to pick the right solution for
# angular_diameter_distance and related methods.
# Be slightly more generous with rtol than the default 1e-8
# used in z_at_value
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
# Test distance functions between two redshifts
z2 = 2.0
func_z1z2 = [lambda z1: core.Planck13._comoving_distance_z1z2(z1, z2),
lambda z1: \
core.Planck13._comoving_transverse_distance_z1z2(z1, z2),
lambda z1: \
core.Planck13.angular_diameter_distance_z1z2(z1, z2)]
for func in func_z1z2:
fval = func(z)
assert allclose(z, funcs.z_at_value(func, fval, zmax=1.5),
rtol=2e-8)
| bsd-3-clause |
ihcfan/NautiPlot | hmi-cocos2d-x/plugin/tools/toolsForGame/main.py | 265 | 3576 | import sys, string, os
from Tkinter import *
import steps
Plugins = sys.argv[1]
print Plugins
pluginList = Plugins.split(':')
maxStep = 2
curStep = 1
stepList = []
# functions
# show step on the num index
def showStep(num):
global stepList
stepNum = len(stepList)
if num >= stepNum or num <= 0 :
pass
i = 0
while i < stepNum:
if i == num:
stepList[i].stepFrame.pack(fill=BOTH, anchor='nw')
else:
stepList[i].stepFrame.pack_forget()
i += 1
# update the pre & next buttons status
def updateBtnState():
global curStep
global btnNextStep
global btnPreStep
if curStep == 1:
btnPreStep['state'] = DISABLED
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Next'
elif curStep == maxStep:
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Finish'
else:
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'Next'
# next button clicked
def nextStep():
if btnNextStep['text'] == 'close':
root.quit()
return
global curStep
nowStepObj = stepList[curStep - 1]
bRet = nowStepObj.checkStep()
if bRet != None:
stepError['text'] = bRet
return
else:
stepError['text'] = ''
if curStep < maxStep:
curStep += 1
showStep(curStep - 1)
updateBtnState()
elif curStep == maxStep:
# disable buttons when process
btnPreStep['state'] = DISABLED
btnNextStep['state'] = DISABLED
# get user input arguments
projPath = stepList[0].getPath()
plugins = stepList[1].getSelectedPlugins()
strPlugins = ''
i = 0
while i < len(plugins):
strPlugins += "plugins/"
strPlugins += plugins[i]
if i != (len(plugins) - 1):
strPlugins += ':'
i += 1
# process shell script to modify the game project
ret = os.system('bash ./toolsForGame/addPluginForGame.sh ' + projPath + ' ' + strPlugins)
if ret != 0:
# enable buttons after process
btnPreStep['state'] = NORMAL
btnNextStep['state'] = NORMAL
stepError['text'] = 'Error during process'
else:
# enable next button & change text to close
btnNextStep['state'] = NORMAL
btnNextStep['text'] = 'close'
stepError['text'] = 'Process Successful!'
# pre button clicked
def preStep():
global curStep
global stepError
stepError['text'] = ''
if curStep > 1:
curStep -= 1
showStep(curStep - 1)
updateBtnState()
# init root view
root = Tk()
root.title('Plugin-x Integration Guide')
root.geometry("600x400")
rootFrame = Frame(root)
rootFrame.pack(fill=BOTH)
# steps view
MyStep1 = steps.step1()
MyStep1.initStep(rootFrame)
MyStep2 = steps.step2()
MyStep2.initStep(rootFrame, pluginList)
stepList.append(MyStep1)
stepList.append(MyStep2)
MyStep1.stepFrame.pack(fill=BOTH, anchor='nw')
# add step error message
controlFrame = Frame(root)
controlFrame.pack(side=BOTTOM, fill=X, anchor='s')
stepError = Label(controlFrame)
stepError.pack(side=LEFT, padx=30)
# add step button
btnNextStep = Button(controlFrame, text='Next', command=nextStep)
btnPreStep = Button(controlFrame, text='Back', command=preStep, state=DISABLED)
btnNextStep.pack(side=RIGHT, padx=30)
btnPreStep.pack(side=RIGHT)
root.mainloop()
| gpl-2.0 |
ilayn/scipy | scipy/_lib/_disjoint_set.py | 12 | 5483 | """
Disjoint set data structure
"""
class DisjointSet:
""" Disjoint set data structure for incremental connectivity queries.
.. versionadded:: 1.6.0
Attributes
----------
n_subsets : int
The number of subsets.
Methods
-------
add
merge
connected
subset
subsets
__getitem__
Notes
-----
This class implements the disjoint set [1]_, also known as the *union-find*
or *merge-find* data structure. The *find* operation (implemented in
`__getitem__`) implements the *path halving* variant. The *merge* method
implements the *merge by size* variant.
References
----------
.. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
Examples
--------
>>> from scipy.cluster.hierarchy import DisjointSet
Initialize a disjoint set:
>>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
Merge some subsets:
>>> disjoint_set.merge(1, 2)
True
>>> disjoint_set.merge(3, 'a')
True
>>> disjoint_set.merge('a', 'b')
True
>>> disjoint_set.merge('b', 'b')
False
Find root elements:
>>> disjoint_set[2]
1
>>> disjoint_set['b']
3
Test connectivity:
>>> disjoint_set.connected(1, 2)
True
>>> disjoint_set.connected(1, 'b')
False
List elements in disjoint set:
>>> list(disjoint_set)
[1, 2, 3, 'a', 'b']
Get the subset containing 'a':
>>> disjoint_set.subset('a')
{'a', 3, 'b'}
Get all subsets in the disjoint set:
>>> disjoint_set.subsets()
[{1, 2}, {'a', 3, 'b'}]
"""
def __init__(self, elements=None):
self.n_subsets = 0
self._sizes = {}
self._parents = {}
# _nbrs is a circular linked list which links connected elements.
self._nbrs = {}
# _indices tracks the element insertion order in `__iter__`.
self._indices = {}
if elements is not None:
for x in elements:
self.add(x)
def __iter__(self):
"""Returns an iterator of the elements in the disjoint set.
Elements are ordered by insertion order.
"""
return iter(self._indices)
def __len__(self):
return len(self._indices)
def __contains__(self, x):
return x in self._indices
def __getitem__(self, x):
"""Find the root element of `x`.
Parameters
----------
x : hashable object
Input element.
Returns
-------
root : hashable object
Root element of `x`.
"""
if x not in self._indices:
raise KeyError(x)
# find by "path halving"
parents = self._parents
while self._indices[x] != self._indices[parents[x]]:
parents[x] = parents[parents[x]]
x = parents[x]
return x
def add(self, x):
"""Add element `x` to disjoint set
"""
if x in self._indices:
return
self._sizes[x] = 1
self._parents[x] = x
self._nbrs[x] = x
self._indices[x] = len(self._indices)
self.n_subsets += 1
def merge(self, x, y):
"""Merge the subsets of `x` and `y`.
The smaller subset (the child) is merged into the larger subset (the
parent). If the subsets are of equal size, the root element which was
first inserted into the disjoint set is selected as the parent.
Parameters
----------
x, y : hashable object
Elements to merge.
Returns
-------
merged : bool
True if `x` and `y` were in disjoint sets, False otherwise.
"""
xr = self[x]
yr = self[y]
if self._indices[xr] == self._indices[yr]:
return False
sizes = self._sizes
if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
xr, yr = yr, xr
self._parents[yr] = xr
self._sizes[xr] += self._sizes[yr]
self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
self.n_subsets -= 1
return True
def connected(self, x, y):
"""Test whether `x` and `y` are in the same subset.
Parameters
----------
x, y : hashable object
Elements to test.
Returns
-------
result : bool
True if `x` and `y` are in the same set, False otherwise.
"""
return self._indices[self[x]] == self._indices[self[y]]
def subset(self, x):
"""Get the subset containing `x`.
Parameters
----------
x : hashable object
Input element.
Returns
-------
result : set
Subset containing `x`.
"""
if x not in self._indices:
raise KeyError(x)
result = [x]
nxt = self._nbrs[x]
while self._indices[nxt] != self._indices[x]:
result.append(nxt)
nxt = self._nbrs[nxt]
return set(result)
def subsets(self):
"""Get all the subsets in the disjoint set.
Returns
-------
result : list
Subsets in the disjoint set.
"""
result = []
visited = set()
for x in self:
if x not in visited:
xset = self.subset(x)
visited.update(xset)
result.append(xset)
return result
| bsd-3-clause |
saurabh6790/omnit-app | patches/march_2013/p05_payment_reconciliation.py | 30 | 1321 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
# delete wrong gle entries created due to a bug in make_gl_entries of Account Controller
# when using payment reconciliation
res = webnotes.conn.sql_list("""select distinct gl1.voucher_no
from `tabGL Entry` gl1, `tabGL Entry` gl2
where
date(gl1.modified) >= "2013-03-11"
and date(gl1.modified) = date(gl2.modified)
and gl1.voucher_no = gl2.voucher_no
and gl1.voucher_type = "Journal Voucher"
and gl1.voucher_type = gl2.voucher_type
and gl1.posting_date = gl2.posting_date
and gl1.account = gl2.account
and ifnull(gl1.is_cancelled, 'No') = 'No' and ifnull(gl2.is_cancelled, 'No') = 'No'
and ifnull(gl1.against_voucher, '') = ifnull(gl2.against_voucher, '')
and ifnull(gl1.against_voucher_type, '') = ifnull(gl2.against_voucher_type, '')
and gl1.remarks = gl2.remarks
and ifnull(gl1.debit, 0) = ifnull(gl2.credit, 0)
and ifnull(gl1.credit, 0) = ifnull(gl2.debit, 0)
and gl1.name > gl2.name""")
for r in res:
webnotes.conn.sql("""update `tabGL Entry` set `is_cancelled`='Yes'
where voucher_type='Journal Voucher' and voucher_no=%s""", r)
jv = webnotes.bean("Journal Voucher", r)
jv.run_method("make_gl_entries")
| agpl-3.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/sortperf.py | 9 | 4806 | """Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except OSError:
r = random.random
result = [r() for i in range(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except OSError:
pass
except OSError as msg:
print("can't write", fn, ":", msg)
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.perf_counter()
L.sort()
t1 = time.perf_counter()
print("%6.2f" % (t1-t0), end=' ')
flush()
def tabulate(r):
r"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print(fmt % (("i", "2**i") + cases))
for i in r:
n = 1 << i
L = randfloats(n)
print("%2d %7d" % (i, n), end=' ')
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in range(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = list(map(lambda x: --x, L))
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = list(map(abs, [-0.5] * n))
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = list(range(half - 1, -1, -1))
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = list(map(float, L))
doit(L) # !sort
print()
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
| apache-2.0 |
DDMAL/Gamera | gamera/plugins/corelation.py | 1 | 3823 | #
# Copyright (C) 2001-2005 Ichiro Fujinaga, Michael Droettboom, and Karl MacMillan
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Various functions related to corelation (template matching)."""
from gamera.plugin import PluginFunction, PluginModule
from gamera.args import ImageType, Args, Float, Point
from gamera.enums import GREYSCALE, ONEBIT
class corelation_weighted(PluginFunction):
"""
Returns a floating-point value for how well an image is corelated
to another image placed at a given origin (*x*, *y*). Uses the
weighted reward/penalty method.
*template*
The template image.
*offset*
The displacement of the template on the image.
*bb*, *bw*, *wb*, *ww*
The rewards and penalties for different combinations of pixels.
The first letter in the arugment name indicates the color of the
template; the second letter indicates the color of the source
image. For instance, the value of *bw* will be applied to the
result when the template pixel is black and the source image
pixel is white.
+--------+--------+------------------+
| Image | | Template |
| +--------+---------+--------+
| | | black | white |
| +--------+---------+--------+
| | black | *bb* | *wb* |
| +--------+---------+--------+
| | white | *bw* | *ww* |
+--------+--------+---------+--------+
"""
return_type = Float("corelation")
self_type = ImageType([ONEBIT, GREYSCALE])
args = Args([ImageType([ONEBIT], "template"),
Point("offset"),
Float("bb"), Float("bw"), Float("wb"), Float("ww")])
class corelation_sum(PluginFunction):
"""
Returns a floating-point value for how well an image is corelated
to another image placed at a given origin (*x*, *y*). Uses the
sum of absolute distance method. A higher value indicates more
corelation.
*template*
The template image.
*offset*
The displacement of the template on the image.
"""
return_type = Float("corelation")
self_type = ImageType([ONEBIT, GREYSCALE])
args = Args([ImageType([ONEBIT], "template"), Point("offset")])
progress_bar = "Correlating"
class corelation_sum_squares(PluginFunction):
"""
Returns a floating-point value for how well an image is corelated
to another image placed at a given origin (*x*, *y*). Uses the
sum of squares method. A higher value indicates more corelation.
*template*
The template image.
*offset*
The displacement of the template on the image.
"""
return_type = Float("corelation")
self_type = ImageType([ONEBIT, GREYSCALE])
args = Args([ImageType([ONEBIT], "template"), Point("offset")])
progress_bar = "Correlating"
class CorelationModule(PluginModule):
cpp_headers = ["corelation.hpp"]
category = "Corelation"
functions = [corelation_weighted, corelation_sum,
corelation_sum_squares]
author = "Michael Droettboom"
url = "http://gamera.sourceforge.net/"
module = CorelationModule()
| gpl-2.0 |
MoRgUiJu/morguiju.repo | plugin.video.adryanlist/youtubedl.py | 255 | 1840 | # -*- coding: utf-8 -*-
import xbmc,xbmcgui
try:
from YDStreamExtractor import getVideoInfo
from YDStreamExtractor import handleDownload
except Exception:
print 'importing Error. You need youtubedl module which is in official xbmc.org'
xbmc.executebuiltin("XBMC.Notification(LiveStreamsPro,Please [COLOR yellow]install Youtube-dl[/COLOR] module ,10000,"")")
def single_YD(url,download=False,dl_info=False,audio=False):
if dl_info:
handleDownload(dl_info,bg=True)
return
else:
info = getVideoInfo(url,quality=3,resolve_redirects=True)
if info is None:
print 'Fail to extract'
return None
elif info and download :
if audio:
try:
for s in info.streams():
print 'len(s[',len(s['ytdl_format']['formats'])
for i in range(len(s['ytdl_format']['formats'])):
if s['ytdl_format']['formats'][i]['format_id'] == '140':
print 'm4a found'
audio_url = s['ytdl_format']['formats'][i]['url'].encode('utf-8','ignore')
title = s['title'].encode('utf-8','ignore')
info = {'url':audio_url,'title':title,'media_type':'audio'}
break
except Exception:
print 'audio download failed'
return
handleDownload(info,bg=True)
else:
for s in info.streams():
try:
stream_url = s['xbmc_url'].encode('utf-8','ignore')
print stream_url
return stream_url
except Exception:
return None
| gpl-2.0 |
NervanaSystems/neon | tests/test_misc.py | 1 | 1558 | # ******************************************************************************
# Copyright 2014-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import logging
import numpy as np
from neon import NervanaObject, logger as neon_logger
logging.basicConfig(level=20)
logger = logging.getLogger()
def test_dropout(backend_default):
ng = NervanaObject.be
ng.bsz = ng.batch_size = 15
d_array2 = ng.array(np.random.randn(24).reshape((6, 4)), dtype=np.float32)
d_error = ng.array(np.random.randn(24).reshape((6, 4)), dtype=np.float32)
mask = ng.empty((6, 4))
logger.info("FPROP")
neon_logger.display(d_array2.get())
# d_array2[:] = ng.dropout(0.5) * d_array2
ng.make_binary_mask(mask, keepthresh=0.5)
d_array2[:] = mask * d_array2
neon_logger.display(d_array2.get())
logger.info("BPROP")
neon_logger.display(d_error.get())
d_error[:] = (d_array2 != 0) * d_error
neon_logger.display(d_error.get())
| apache-2.0 |
colinnewell/odoo | openerp/pooler.py | 374 | 2561 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Functions kept for backward compatibility.
They are simple wrappers around a global RegistryManager methods.
"""
import logging
import openerp.conf.deprecation
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False):
"""Create and return a database connection and a newly initialized registry."""
assert openerp.conf.deprecation.openerp_pooler
_logger.warning('openerp.pooler.get_db_and_pool() is deprecated.')
registry = RegistryManager.get(db_name, force_demo, status, update_module)
return registry._db, registry
def restart_pool(db_name, force_demo=False, status=None, update_module=False):
"""Delete an existing registry and return a database connection and a newly initialized registry."""
_logger.warning('openerp.pooler.restart_pool() is deprecated.')
assert openerp.conf.deprecation.openerp_pooler
registry = RegistryManager.new(db_name, force_demo, status, update_module)
return registry._db, registry
def get_db(db_name):
"""Return a database connection. The corresponding registry is initialized."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name)[0]
def get_pool(db_name, force_demo=False, status=None, update_module=False):
"""Return a model registry."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name, force_demo, status, update_module)[1]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdboom/astropy-helpers | astropy_helpers/sphinx/ext/phantom_import.py | 84 | 5684 | """
==============
phantom_import
==============
Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
extensions to use docstrings loaded from an XML file.
This extension loads an XML file in the Pydocweb format [1] and
creates a dummy module that contains the specified docstrings. This
can be used to get the current docstrings from a Pydocweb instance
without needing to rebuild the documented module.
.. [1] http://code.google.com/p/pydocweb
"""
import imp, sys, compiler, types, os, inspect, re
def setup(app):
app.connect('builder-inited', initialize)
app.add_config_value('phantom_import_file', None, True)
def initialize(app):
fn = app.config.phantom_import_file
if (fn and os.path.isfile(fn)):
print "[numpydoc] Phantom importing modules from", fn, "..."
import_phantom_module(fn)
#------------------------------------------------------------------------------
# Creating 'phantom' modules from an XML description
#------------------------------------------------------------------------------
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
| bsd-3-clause |
fireduck64/electrum | gui/stdio.py | 3 | 7681 | from decimal import Decimal
_ = lambda x:x
#from i18n import _
from electrum import WalletStorage, Wallet
from electrum.util import format_satoshis, set_verbosity
from electrum.bitcoin import is_valid, COIN, TYPE_ADDRESS
from electrum.network import filter_protocol
import sys, getpass, datetime
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print "Wallet not found. try 'electrum create'"
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.done = 0
self.last_balance = ""
set_verbosity(False)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_threads(self.network)
self.contacts = self.wallet.contacts
self.network.register_callback(self.on_network, ['updated', 'banner'])
self.commands = [_("[h] - displays this help text"), \
_("[i] - display transaction history"), \
_("[o] - enter payment order"), \
_("[p] - print stored payment order"), \
_("[s] - send stored payment order"), \
_("[r] - show own receipt addresses"), \
_("[c] - display contacts"), \
_("[b] - print server banner"), \
_("[q] - quit") ]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event == 'updated':
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = raw_input("enter command: ")
if c == "h" : self.print_commands()
elif c == "i" : self.print_history()
elif c == "o" : self.enter_order()
elif c == "p" : self.print_order()
elif c == "s" : self.send_order()
elif c == "r" : self.print_addresses()
elif c == "c" : self.print_contacts()
elif c == "b" : self.print_banner()
elif c == "n" : self.network_dialog()
elif c == "e" : self.settings_dialog()
elif c == "q" : self.done = 1
else: self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%" \
+ "%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
messages = []
for item in self.wallet.get_history():
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append( format_str%( time_str, label, format_satoshis(value, whitespaces=True), format_satoshis(balance, whitespaces=True) ) )
self.print_list(messages[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _( "Synchronizing..." )
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _( "Not connected" )
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s "%("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s "%(addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, "%19s %25s "%("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount \
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = raw_input("Pay to: ")
self.str_description = raw_input("Description : ")
self.str_amount = raw_input("Amount: ")
self.str_fee = raw_input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate( self.wallet.network.banner.split('\n') ):
print( x )
def print_list(self, list, firstline):
self.maxpos = len(list)
if not self.maxpos: return
print(firstline)
for i in range(self.maxpos):
msg = list[i] if i < len(list) else ""
print(msg)
def main(self):
while self.done == 0: self.main_command()
def do_send(self):
if not is_valid(self.str_recipient):
print(_('Invalid Bitcoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.use_encryption:
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = raw_input("ok to send (y/n)?")
if c == "n": return
try:
tx = self.wallet.mktx([(TYPE_ADDRESS, self.str_recipient, amount)], password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.hash()] = self.str_description
print(_("Please wait..."))
status, msg = self.network.broadcast(tx)
if status:
print(_('Payment sent.'))
#self.do_clear()
#self.update_contacts_tab()
else:
print(_('Error'))
def network_dialog(self):
print("use 'electrum setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
#if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
| mit |
ntfreedom/neverendshadowsocks | setup.py | 929 | 1321 | import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocks",
version="2.8.2",
license='http://www.apache.org/licenses/LICENSE-2.0',
description="A fast tunnel proxy that help you get through firewalls",
author='clowwindy',
author_email='clowwindy42@gmail.com',
url='https://github.com/shadowsocks/shadowsocks',
packages=['shadowsocks', 'shadowsocks.crypto'],
package_data={
'shadowsocks': ['README.rst', 'LICENSE']
},
install_requires=[],
entry_points="""
[console_scripts]
sslocal = shadowsocks.local:main
ssserver = shadowsocks.server:main
""",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
| apache-2.0 |
t10471/python | practice/src/design_pattern/Command.py | 1 | 1845 | # -*- coding: utf-8 -*-
#大体chain of responseibilityと同じだが、
#chain of responseibilityはhandlerと実処理が一緒なのに対して
#commandはhandlerと実処理が別になっている
class File(object):
def __init__(self, name):
self.name = name
def getName(self):
return self.name
def decompress(self):
print self.name + u'を展開しました'
def compress(self):
print self.name + u'を圧縮しました'
def create(self):
print self.name + u'を作成しました'
class TouchCommand(object):
def __init__(self, file):
self.file = file
def execute(self):
self.file.create()
class CompressCommand (object):
def __init__(self, file):
self.file = file
def execute(self):
self.file.compress()
class CopyCommand(object):
def __init__(self, file):
self.file = file
def execute(self):
file = File('copy ' + self.file.getName())
file.create()
class Queue(object):
def __init__(self):
self.commands = []
self.current_index = 0
def addCommand(self, command):
self.commands.append(command)
def run(self):
while True:
command = self.next()
if command is None:
break
command.execute()
def next(self):
if len(self.commands) == 0 or len(self.commands) <= self.current_index:
return None
else:
ret = self.commands[self.current_index]
self.current_index = self.current_index + 1
return ret
if __name__ == '__main__':
queue = Queue()
file = File("sample.txt")
queue.addCommand(TouchCommand(file))
queue.addCommand(CompressCommand(file))
queue.addCommand(CopyCommand(file))
queue.run() | mit |
echizentm/CompactDataStructures | chapter_02/get_huffman_codes.py | 1 | 1189 | # coding: utf-8
import sys
from queue import PriorityQueue
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
self.codes = []
def __lt__(self, other):
return self.value < other.value
def huffman_codes(self, node, code):
if node.left is None:
self.codes.append(code)
else:
self.huffman_codes(node.left, code + '0')
self.huffman_codes(node.right, code + '1')
def __str__(self):
self.codes = []
self.huffman_codes(self, '')
return 'value={}, codes={}'.format(self.value, self.codes)
def huffman_tree(probs):
q = PriorityQueue()
for p in probs:
q.put(Node(p))
while True:
left = q.get()
if q.empty():
return left
right = q.get()
q.put(Node(
left.value + right.value,
left=left, right=right,
))
for line in sys.stdin:
line = line.rstrip()
# line is comma separated probs or freqs (e.g. 5,2,2,1,1)
probs = list(map(lambda x: float(x), line.split(',')))
print(huffman_tree(probs))
| apache-2.0 |
ecoal95/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/common/bufsocket.py | 38 | 8385 | # -*- coding: utf-8 -*-
"""
hyper/http20/bufsocket.py
~~~~~~~~~~~~~~~~~~~~~~~~~
This file implements a buffered socket wrapper.
The purpose of this is to avoid the overhead of unnecessary syscalls while
allowing small reads from the network. This represents a potentially massive
performance optimisation at the cost of burning some memory in the userspace
process.
"""
import select
from .exceptions import ConnectionResetError, LineTooLongError
class BufferedSocket(object):
"""
A buffered socket wrapper.
The purpose of this is to avoid the overhead of unnecessary syscalls while
allowing small reads from the network. This represents a potentially
massive performance optimisation at the cost of burning some memory in the
userspace process.
"""
def __init__(self, sck, buffer_size=1000):
"""
Create the buffered socket.
:param sck: The socket to wrap.
:param buffer_size: The size of the backing buffer in bytes. This
parameter should be set to an appropriate value for your use case.
Small values of ``buffer_size`` increase the overhead of buffer
management: large values cause more memory to be used.
"""
# The wrapped socket.
self._sck = sck
# The buffer we're using.
self._backing_buffer = bytearray(buffer_size)
self._buffer_view = memoryview(self._backing_buffer)
# The size of the buffer.
self._buffer_size = buffer_size
# The start index in the memory view.
self._index = 0
# The number of bytes in the buffer.
self._bytes_in_buffer = 0
@property
def _remaining_capacity(self):
"""
The maximum number of bytes the buffer could still contain.
"""
return self._buffer_size - self._index
@property
def _buffer_end(self):
"""
The index of the first free byte in the buffer.
"""
return self._index + self._bytes_in_buffer
@property
def can_read(self):
"""
Whether or not there is more data to read from the socket.
"""
read = select.select([self._sck], [], [], 0)[0]
if read:
return True
return False
@property
def buffer(self):
"""
Get access to the buffer itself.
"""
return self._buffer_view[self._index:self._buffer_end]
def advance_buffer(self, count):
"""
Advances the buffer by the amount of data consumed outside the socket.
"""
self._index += count
self._bytes_in_buffer -= count
def new_buffer(self):
"""
This method moves all the data in the backing buffer to the start of
a new, fresh buffer. This gives the ability to read much more data.
"""
def read_all_from_buffer():
end = self._index + self._bytes_in_buffer
return self._buffer_view[self._index:end]
new_buffer = bytearray(self._buffer_size)
new_buffer_view = memoryview(new_buffer)
new_buffer_view[0:self._bytes_in_buffer] = read_all_from_buffer()
self._index = 0
self._backing_buffer = new_buffer
self._buffer_view = new_buffer_view
return
def recv(self, amt):
"""
Read some data from the socket.
:param amt: The amount of data to read.
:returns: A ``memoryview`` object containing the appropriate number of
bytes. The data *must* be copied out by the caller before the next
call to this function.
"""
# In this implementation you can never read more than the number of
# bytes in the buffer.
if amt > self._buffer_size:
amt = self._buffer_size
# If the amount of data we've been asked to read is less than the
# remaining space in the buffer, we need to clear out the buffer and
# start over.
if amt > self._remaining_capacity:
self.new_buffer()
# If there's still some room in the buffer, opportunistically attempt
# to read into it.
# If we don't actually _need_ the data (i.e. there's enough in the
# buffer to satisfy the request), use select to work out if the read
# attempt will block. If it will, don't bother reading. If we need the
# data, always do the read.
if self._bytes_in_buffer >= amt:
should_read = select.select([self._sck], [], [], 0)[0]
else:
should_read = True
if (self._remaining_capacity > self._bytes_in_buffer and should_read):
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
# The socket just got closed. We should throw an exception if we
# were asked for more data than we can return.
if not count and amt > self._bytes_in_buffer:
raise ConnectionResetError()
self._bytes_in_buffer += count
# Read out the bytes and update the index.
amt = min(amt, self._bytes_in_buffer)
data = self._buffer_view[self._index:self._index+amt]
self._index += amt
self._bytes_in_buffer -= amt
return data
def fill(self):
"""
Attempts to fill the buffer as much as possible. It will block for at
most the time required to have *one* ``recv_into`` call return.
"""
if not self._remaining_capacity:
self.new_buffer()
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
if not count:
raise ConnectionResetError()
self._bytes_in_buffer += count
return
def readline(self):
"""
Read up to a newline from the network and returns it. The implicit
maximum line length is the buffer size of the buffered socket.
Note that, unlike recv, this method absolutely *does* block until it
can read the line.
:returns: A ``memoryview`` object containing the appropriate number of
bytes. The data *must* be copied out by the caller before the next
call to this function.
"""
# First, check if there's anything in the buffer. This is one of those
# rare circumstances where this will work correctly on all platforms.
index = self._backing_buffer.find(
b'\n',
self._index,
self._index + self._bytes_in_buffer
)
if index != -1:
length = index + 1 - self._index
data = self._buffer_view[self._index:self._index+length]
self._index += length
self._bytes_in_buffer -= length
return data
# In this case, we didn't find a newline in the buffer. To fix that,
# read some data into the buffer. To do our best to satisfy the read,
# we should shunt the data down in the buffer so that it's right at
# the start. We don't bother if we're already at the start of the
# buffer.
if self._index != 0:
self.new_buffer()
while self._bytes_in_buffer < self._buffer_size:
count = self._sck.recv_into(self._buffer_view[self._buffer_end:])
if not count:
raise ConnectionResetError()
# We have some more data. Again, look for a newline in that gap.
first_new_byte = self._buffer_end
self._bytes_in_buffer += count
index = self._backing_buffer.find(
b'\n',
first_new_byte,
first_new_byte + count,
)
if index != -1:
# The length of the buffer is the index into the
# buffer at which we found the newline plus 1, minus the start
# index of the buffer, which really should be zero.
assert not self._index
length = index + 1
data = self._buffer_view[:length]
self._index += length
self._bytes_in_buffer -= length
return data
# If we got here, it means we filled the buffer without ever getting
# a newline. Time to throw an exception.
raise LineTooLongError()
def __getattr__(self, name):
return getattr(self._sck, name)
| mpl-2.0 |
tempbottle/rethinkdb | test/rql_test/connections/http_support/decorator/decorator.py | 112 | 10639 | ########################## LICENCE ###############################
# Copyright (c) 2005-2012, Michele Simionato
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in bytecode form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""
Decorator module, see http://pypi.python.org/pypi/decorator
for the documentation.
"""
__version__ = '3.4.0'
__all__ = ["decorator", "FunctionMaker", "contextmanager"]
import sys, re, inspect
if sys.version >= '3':
from inspect import getfullargspec
def get_init(cls):
return cls.__init__
else:
class getfullargspec(object):
"A quick and dirty replacement for getfullargspec for Python 2.X"
def __init__(self, f):
self.args, self.varargs, self.varkw, self.defaults = \
inspect.getargspec(f)
self.kwonlyargs = []
self.kwonlydefaults = None
def __iter__(self):
yield self.args
yield self.varargs
yield self.varkw
yield self.defaults
def get_init(cls):
return cls.__init__.im_func
DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(')
# basic functionality
class FunctionMaker(object):
"""
An object with the ability to create functions with a given signature.
It has attributes name, doc, module, signature, defaults, dict and
methods update and make.
"""
def __init__(self, func=None, name=None, signature=None,
defaults=None, doc=None, module=None, funcdict=None):
self.shortsignature = signature
if func:
# func can be a class or a callable, but not an instance method
self.name = func.__name__
if self.name == '<lambda>': # small hack for lambda functions
self.name = '_lambda_'
self.doc = func.__doc__
self.module = func.__module__
if inspect.isfunction(func):
argspec = getfullargspec(func)
self.annotations = getattr(func, '__annotations__', {})
for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs',
'kwonlydefaults'):
setattr(self, a, getattr(argspec, a))
for i, arg in enumerate(self.args):
setattr(self, 'arg%d' % i, arg)
if sys.version < '3': # easy way
self.shortsignature = self.signature = \
inspect.formatargspec(
formatvalue=lambda val: "", *argspec)[1:-1]
else: # Python 3 way
allargs = list(self.args)
allshortargs = list(self.args)
if self.varargs:
allargs.append('*' + self.varargs)
allshortargs.append('*' + self.varargs)
elif self.kwonlyargs:
allargs.append('*') # single star syntax
for a in self.kwonlyargs:
allargs.append('%s=None' % a)
allshortargs.append('%s=%s' % (a, a))
if self.varkw:
allargs.append('**' + self.varkw)
allshortargs.append('**' + self.varkw)
self.signature = ', '.join(allargs)
self.shortsignature = ', '.join(allshortargs)
self.dict = func.__dict__.copy()
# func=None happens when decorating a caller
if name:
self.name = name
if signature is not None:
self.signature = signature
if defaults:
self.defaults = defaults
if doc:
self.doc = doc
if module:
self.module = module
if funcdict:
self.dict = funcdict
# check existence required attributes
assert hasattr(self, 'name')
if not hasattr(self, 'signature'):
raise TypeError('You are decorating a non function: %s' % func)
def update(self, func, **kw):
"Update the signature of func with the data in self"
func.__name__ = self.name
func.__doc__ = getattr(self, 'doc', None)
func.__dict__ = getattr(self, 'dict', {})
func.func_defaults = getattr(self, 'defaults', ())
func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None)
func.__annotations__ = getattr(self, 'annotations', None)
callermodule = sys._getframe(3).f_globals.get('__name__', '?')
func.__module__ = getattr(self, 'module', callermodule)
func.__dict__.update(kw)
def make(self, src_templ, evaldict=None, addsource=False, **attrs):
"Make a new function from a given template and update the signature"
src = src_templ % vars(self) # expand name and signature
evaldict = evaldict or {}
mo = DEF.match(src)
if mo is None:
raise SyntaxError('not a valid function template\n%s' % src)
name = mo.group(1) # extract the function name
names = set([name] + [arg.strip(' *') for arg in
self.shortsignature.split(',')])
for n in names:
if n in ('_func_', '_call_'):
raise NameError('%s is overridden in\n%s' % (n, src))
if not src.endswith('\n'): # add a newline just for safety
src += '\n' # this is needed in old versions of Python
try:
code = compile(src, '<string>', 'single')
# print >> sys.stderr, 'Compiling %s' % src
exec code in evaldict
except:
print >> sys.stderr, 'Error in generated code:'
print >> sys.stderr, src
raise
func = evaldict[name]
if addsource:
attrs['__source__'] = src
self.update(func, **attrs)
return func
@classmethod
def create(cls, obj, body, evaldict, defaults=None,
doc=None, module=None, addsource=True, **attrs):
"""
Create a function from the strings name, signature and body.
evaldict is the evaluation dictionary. If addsource is true an attribute
__source__ is added to the result. The attributes attrs are added,
if any.
"""
if isinstance(obj, str): # "name(signature)"
name, rest = obj.strip().split('(', 1)
signature = rest[:-1] #strip a right parens
func = None
else: # a function
name = None
signature = None
func = obj
self = cls(func, name, signature, defaults, doc, module)
ibody = '\n'.join(' ' + line for line in body.splitlines())
return self.make('def %(name)s(%(signature)s):\n' + ibody,
evaldict, addsource, **attrs)
def decorator(caller, func=None):
"""
decorator(caller) converts a caller function into a decorator;
decorator(caller, func) decorates a function using a caller.
"""
if func is not None: # returns a decorated function
evaldict = func.func_globals.copy()
evaldict['_call_'] = caller
evaldict['_func_'] = func
return FunctionMaker.create(
func, "return _call_(_func_, %(shortsignature)s)",
evaldict, undecorated=func, __wrapped__=func)
else: # returns a decorator
if inspect.isclass(caller):
name = caller.__name__.lower()
callerfunc = get_init(caller)
doc = 'decorator(%s) converts functions/generators into ' \
'factories of %s objects' % (caller.__name__, caller.__name__)
fun = getfullargspec(callerfunc).args[1] # second arg
elif inspect.isfunction(caller):
name = '_lambda_' if caller.__name__ == '<lambda>' \
else caller.__name__
callerfunc = caller
doc = caller.__doc__
fun = getfullargspec(callerfunc).args[0] # first arg
else: # assume caller is an object with a __call__ method
name = caller.__class__.__name__.lower()
callerfunc = caller.__call__.im_func
doc = caller.__call__.__doc__
fun = getfullargspec(callerfunc).args[1] # second arg
evaldict = callerfunc.func_globals.copy()
evaldict['_call_'] = caller
evaldict['decorator'] = decorator
return FunctionMaker.create(
'%s(%s)' % (name, fun),
'return decorator(_call_, %s)' % fun,
evaldict, undecorated=caller, __wrapped__=caller,
doc=doc, module=caller.__module__)
######################### contextmanager ########################
def __call__(self, func):
'Context manager decorator'
return FunctionMaker.create(
func, "with _self_: return _func_(%(shortsignature)s)",
dict(_self_=self, _func_=func), __wrapped__=func)
try: # Python >= 3.2
from contextlib import _GeneratorContextManager
ContextManager = type(
'ContextManager', (_GeneratorContextManager,), dict(__call__=__call__))
except ImportError: # Python >= 2.5
from contextlib import GeneratorContextManager
def __init__(self, f, *a, **k):
return GeneratorContextManager.__init__(self, f(*a, **k))
ContextManager = type(
'ContextManager', (GeneratorContextManager,),
dict(__call__=__call__, __init__=__init__))
contextmanager = decorator(ContextManager)
| agpl-3.0 |
aperigault/ansible | lib/ansible/modules/network/f5/bigip_firewall_policy.py | 38 | 16743 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_policy
short_description: Manage AFM security firewall policies on a BIG-IP
description:
- Manages AFM security firewall policies on a BIG-IP.
version_added: 2.7
options:
name:
description:
- The name of the policy to create.
type: str
required: True
description:
description:
- The description to attach to the policy.
- This parameter is only supported on versions of BIG-IP >= 12.1.0. On earlier
versions it will simply be ignored.
type: str
state:
description:
- When C(state) is C(present), ensures that the policy exists.
- When C(state) is C(absent), ensures that the policy is removed.
type: str
choices:
- present
- absent
default: present
rules:
description:
- Specifies a list of rules that you want associated with this policy.
The order of this list is the order they will be evaluated by BIG-IP.
If the specified rules do not exist (for example when creating a new
policy) then they will be created.
- Rules specified here, if they do not exist, will be created with "default deny"
behavior. It is expected that you follow-up this module with the actual
configuration for these rules.
- The C(bigip_firewall_rule) module can be used to also create, as well as
edit, existing and new rules.
type: list
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a basic policy with some rule stubs
bigip_firewall_policy:
name: foo
rules:
- rule1
- rule2
- rule3
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the policy.
returned: changed
type: str
sample: My firewall policy
rules:
description: The list of rules, in the order that they are evaluated, on the device.
returned: changed
type: list
sample: ['rule1', 'rule2', 'rule3']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'rulesReference': 'rules'
}
api_attributes = [
'description'
]
returnables = [
'description',
'rules',
]
updatables = [
'description',
'rules'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ModuleParameters(Parameters):
@property
def rules(self):
if self._values['rules'] is None:
return None
# In case rule values are unicode (as they may be coming from the API
result = [str(x) for x in self._values['rules']]
return result
class ApiParameters(Parameters):
@property
def rules(self):
result = []
if self._values['rules'] is None or 'items' not in self._values['rules']:
return []
for idx, item in enumerate(self._values['rules']['items']):
result.append(dict(item=item['fullPath'], order=idx))
result = [x['item'] for x in sorted(result, key=lambda k: k['order'])]
return result
class Changes(Parameters):
pass
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def rules(self):
if self.want.rules is None:
return None
if self.have.rules is None:
return self.want.rules
if set(self.want.rules) != set(self.have.rules):
return self.want.rules
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.rules:
self._upsert_policy_rules_on_device()
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
if params:
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
if self.changes.rules is not None:
self._upsert_policy_rules_on_device()
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/?expandSubcollections=true".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def rule_exists(self, rule):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
rule.replace('/', '_')
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_default_rule_on_device(self, rule):
params = dict(
name=rule.replace('/', '_'),
action='reject',
# Adding items to the end of the list causes the list of rules to match
# what the user specified in the original list.
placeAfter='last',
)
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_rule_from_device(self, rule):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
rule.replace('/', '_'),
)
# this response returns no payload
resp = self.client.api.delete(uri)
if resp.status in [400, 403]:
raise F5ModuleError(resp.content)
def move_rule_to_front(self, rule):
params = dict(
placeAfter='last'
)
uri = "https://{0}:{1}/mgmt/tm/security/firewall/policy/{2}/rules/{3}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
rule.replace('/', '_')
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def _upsert_policy_rules_on_device(self):
rules = self.changes.rules
if rules is None:
rules = []
self._remove_rule_difference(rules)
for idx, rule in enumerate(rules):
if not self.rule_exists(rule):
self.create_default_rule_on_device(rule)
for idx, rule in enumerate(rules):
self.move_rule_to_front(rule)
def _remove_rule_difference(self, rules):
if rules is None or self.have.rules is None:
return
have_rules = set(self.have.rules)
want_rules = set(rules)
removable = have_rules.difference(want_rules)
for remove in removable:
self.remove_rule_from_device(remove)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
rules=dict(type='list'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
nelmiux/CarnotKE | jyhton/Lib/test/test_collections.py | 10 | 43841 |
import unittest, doctest, operator
import inspect
from test import test_support
from collections import namedtuple, Counter, OrderedDict
from test import mapping_tests
import pickle, cPickle, copy
from random import randrange, shuffle
import keyword
import re
import sys
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assertNotIn("u'", repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assertNotIn("u'", repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_factory_doc_attr(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__doc__, 'Point(x, y)')
def test_name_fixer(self):
for spec, renamed in [
[('efg', 'g%hi'), ('efg', '_1')], # field with non-alpha char
[('abc', 'class'), ('abc', '_1')], # field has keyword
[('8efg', '9ghi'), ('_0', '_1')], # field starts with digit
[('abc', '_efg'), ('abc', '_1')], # field with leading underscore
[('abc', 'efg', 'efg', 'ghi'), ('abc', 'efg', '_2', 'ghi')], # duplicate field
[('abc', '', 'x'), ('abc', '_1', 'x')], # fieldname is a space
]:
self.assertEqual(namedtuple('NT', spec, rename=True)._fields, renamed)
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assertNotIn('__weakref__', dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
self.assertEqual(vars(p), p._asdict()) # verify that vars() works
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertIsInstance(p, tuple)
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
#XXX: currently Jython's classfile limits are exceeded by Big tests.
if not test_support.is_jython:
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_name_conflicts(self):
# Some names like "self", "cls", "tuple", "itemgetter", and "property"
# failed when used as field names. Test to make sure these now work.
T = namedtuple('T', 'itemgetter property self cls tuple')
t = T(1, 2, 3, 4, 5)
self.assertEqual(t, (1,2,3,4,5))
newt = t._replace(itemgetter=10, property=20, self=30, cls=40, tuple=50)
self.assertEqual(newt, (10,20,30,40,50))
# Broader test of all interesting names in a template
with test_support.captured_stdout() as template:
T = namedtuple('T', 'x', verbose=True)
words = set(re.findall('[A-Za-z]+', template.getvalue()))
words -= set(keyword.kwlist)
T = namedtuple('T', words)
# test __new__
values = tuple(range(len(words)))
t = T(*values)
self.assertEqual(t, values)
t = T(**dict(zip(T._fields, values)))
self.assertEqual(t, values)
# test _make
t = T._make(values)
self.assertEqual(t, values)
# exercise __repr__
repr(t)
# test _asdict
self.assertEqual(t._asdict(), dict(zip(T._fields, values)))
# test _replace
t = T._make(values)
newvalues = tuple(v*10 for v in values)
newt = t._replace(**dict(zip(T._fields, newvalues)))
self.assertEqual(newt, newvalues)
# test _fields
self.assertEqual(T._fields, tuple(words))
# test __getnewargs__
self.assertEqual(t.__getnewargs__(), values)
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
def validate_isinstance(self, abc, name):
stub = lambda s, *args: 0
# new-style class
C = type('C', (object,), {name: stub})
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
# old-style class
class C: pass
# XXX: not working in Jython old style classes. Do we care?
if not test_support.is_jython:
setattr(C, name, stub)
self.assertIsInstance(C(), abc)
self.assertTrue(issubclass(C, abc))
# new-style class
C = type('C', (object,), {'__hash__': None})
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
# old-style class
class C: pass
self.assertNotIsInstance(C(), abc)
self.assertFalse(issubclass(C, abc))
def validate_comparison(self, instance):
ops = ['lt', 'gt', 'le', 'ge', 'ne', 'or', 'and', 'xor', 'sub']
operators = {}
for op in ops:
name = '__' + op + '__'
operators[name] = getattr(operator, name)
class Other:
def __init__(self):
self.right_side = False
def __eq__(self, other):
self.right_side = True
return True
__lt__ = __eq__
__gt__ = __eq__
__le__ = __eq__
__ge__ = __eq__
__ne__ = __eq__
__ror__ = __eq__
__rand__ = __eq__
__rxor__ = __eq__
__rsub__ = __eq__
for name, op in operators.items():
if not hasattr(instance, name):
continue
other = Other()
op(instance, other)
self.assertTrue(other.right_side,'Right side not called for %s.%s'
% (type(instance), name))
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.assertNotIsInstance(x, Hashable)
self.assertFalse(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.assertIsInstance(x, Hashable)
self.assertTrue(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
#XXX: Do we need this to work for Jython?
#__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.assertFalse(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
self.validate_isinstance(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.assertNotIsInstance(x, Iterable)
self.assertFalse(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterable)
self.assertTrue(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.assertFalse(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
self.validate_isinstance(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.assertNotIsInstance(x, Iterator)
self.assertFalse(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.assertIsInstance(x, Iterator)
self.assertTrue(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, 'next', '__iter__')
# Issue 10565
class NextOnly:
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnly(), Iterator)
class NextOnlyNew(object):
def __next__(self):
yield 1
raise StopIteration
self.assertNotIsInstance(NextOnlyNew(), Iterator)
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Sized)
self.assertFalse(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.assertIsInstance(x, Sized)
self.assertTrue(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
self.validate_isinstance(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Container)
self.assertFalse(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.assertIsInstance(x, Container)
self.assertTrue(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
self.validate_isinstance(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.assertNotIsInstance(x, Callable)
self.assertFalse(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.assertIsInstance(x, Callable)
self.assertTrue(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
self.validate_isinstance(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.assertTrue(issubclass(C, B))
self.assertFalse(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.assertFalse(issubclass(C, B), B.__name__)
B.register(C)
self.assertTrue(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.assertIsInstance(sample(), Set)
self.assertTrue(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
class MySet(Set):
def __contains__(self, x):
return False
def __len__(self):
return 0
def __iter__(self):
return iter([])
self.validate_comparison(MySet())
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.assertTrue(hash(a) == hash(b))
def test_MutableSet(self):
self.assertIsInstance(set(), MutableSet)
self.assertTrue(issubclass(set, MutableSet))
self.assertNotIsInstance(frozenset(), MutableSet)
self.assertFalse(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
class MySet(collections.MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
values = [5,43,2,1]
s = MySet(values)
self.assertIn(s.pop(), values)
def test_issue8750(self):
empty = WithSet()
full = WithSet(range(10))
s = WithSet(full)
s -= s
self.assertEqual(s, empty)
s = WithSet(full)
s ^= s
self.assertEqual(s, empty)
s = WithSet(full)
s &= s
self.assertEqual(s, full)
s |= s
self.assertEqual(s, full)
def test_Mapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), Mapping)
self.assertTrue(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
class MyMapping(collections.Mapping):
def __len__(self):
return 0
def __getitem__(self, i):
raise IndexError
def __iter__(self):
return iter(())
self.validate_comparison(MyMapping())
def test_MutableMapping(self):
for sample in [dict]:
self.assertIsInstance(sample(), MutableMapping)
self.assertTrue(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_Sequence(self):
for sample in [tuple, list, str]:
self.assertIsInstance(sample(), Sequence)
self.assertTrue(issubclass(sample, Sequence))
self.assertTrue(issubclass(basestring, Sequence))
self.assertIsInstance(range(10), Sequence)
self.assertTrue(issubclass(xrange, Sequence))
self.assertTrue(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableSequence(self):
for sample in [tuple, str]:
self.assertNotIsInstance(sample(), MutableSequence)
self.assertFalse(issubclass(sample, MutableSequence))
for sample in [list]:
self.assertIsInstance(sample(), MutableSequence)
self.assertTrue(issubclass(sample, MutableSequence))
self.assertFalse(issubclass(basestring, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
class TestCounter(unittest.TestCase):
def test_basics(self):
c = Counter('abcaba')
self.assertEqual(c, Counter({'a':3 , 'b': 2, 'c': 1}))
self.assertEqual(c, Counter(a=3, b=2, c=1))
self.assertIsInstance(c, dict)
self.assertIsInstance(c, Mapping)
self.assertTrue(issubclass(Counter, dict))
self.assertTrue(issubclass(Counter, Mapping))
self.assertEqual(len(c), 3)
self.assertEqual(sum(c.values()), 6)
self.assertEqual(sorted(c.values()), [1, 2, 3])
self.assertEqual(sorted(c.keys()), ['a', 'b', 'c'])
self.assertEqual(sorted(c), ['a', 'b', 'c'])
self.assertEqual(sorted(c.items()),
[('a', 3), ('b', 2), ('c', 1)])
self.assertEqual(c['b'], 2)
self.assertEqual(c['z'], 0)
with test_support.check_py3k_warnings():
self.assertEqual(c.has_key('c'), True)
self.assertEqual(c.has_key('z'), False)
self.assertEqual(c.__contains__('c'), True)
self.assertEqual(c.__contains__('z'), False)
self.assertEqual(c.get('b', 10), 2)
self.assertEqual(c.get('z', 10), 10)
self.assertEqual(c, dict(a=3, b=2, c=1))
self.assertEqual(repr(c), "Counter({'a': 3, 'b': 2, 'c': 1})")
self.assertEqual(c.most_common(), [('a', 3), ('b', 2), ('c', 1)])
for i in range(5):
self.assertEqual(c.most_common(i),
[('a', 3), ('b', 2), ('c', 1)][:i])
self.assertEqual(''.join(sorted(c.elements())), 'aaabbc')
c['a'] += 1 # increment an existing value
c['b'] -= 2 # sub existing value to zero
del c['c'] # remove an entry
del c['c'] # make sure that del doesn't raise KeyError
c['d'] -= 2 # sub from a missing value
c['e'] = -5 # directly assign a missing value
c['f'] += 4 # add to a missing value
self.assertEqual(c, dict(a=4, b=0, d=-2, e=-5, f=4))
self.assertEqual(''.join(sorted(c.elements())), 'aaaaffff')
self.assertEqual(c.pop('f'), 4)
self.assertNotIn('f', c)
for i in range(3):
elem, cnt = c.popitem()
self.assertNotIn(elem, c)
c.clear()
self.assertEqual(c, {})
self.assertEqual(repr(c), 'Counter()')
self.assertRaises(NotImplementedError, Counter.fromkeys, 'abc')
self.assertRaises(TypeError, hash, c)
c.update(dict(a=5, b=3))
c.update(c=1)
c.update(Counter('a' * 50 + 'b' * 30))
c.update() # test case with no args
c.__init__('a' * 500 + 'b' * 300)
c.__init__('cdc')
c.__init__()
self.assertEqual(c, dict(a=555, b=333, c=3, d=1))
self.assertEqual(c.setdefault('d', 5), 1)
self.assertEqual(c['d'], 1)
self.assertEqual(c.setdefault('e', 5), 5)
self.assertEqual(c['e'], 5)
def test_copying(self):
# Check that counters are copyable, deepcopyable, picklable, and
#have a repr/eval round-trip
words = Counter('which witch had which witches wrist watch'.split())
update_test = Counter()
update_test.update(words)
for i, dup in enumerate([
words.copy(),
copy.copy(words),
copy.deepcopy(words),
pickle.loads(pickle.dumps(words, 0)),
pickle.loads(pickle.dumps(words, 1)),
pickle.loads(pickle.dumps(words, 2)),
pickle.loads(pickle.dumps(words, -1)),
cPickle.loads(cPickle.dumps(words, 0)),
cPickle.loads(cPickle.dumps(words, 1)),
cPickle.loads(cPickle.dumps(words, 2)),
cPickle.loads(cPickle.dumps(words, -1)),
eval(repr(words)),
update_test,
Counter(words),
]):
msg = (i, dup, words)
self.assertTrue(dup is not words)
self.assertEqual(dup, words)
self.assertEqual(len(dup), len(words))
self.assertEqual(type(dup), type(words))
def test_copy_subclass(self):
class MyCounter(Counter):
pass
c = MyCounter('slartibartfast')
d = c.copy()
self.assertEqual(d, c)
self.assertEqual(len(d), len(c))
self.assertEqual(type(d), type(c))
def test_conversions(self):
# Convert to: set, list, dict
s = 'she sells sea shells by the sea shore'
self.assertEqual(sorted(Counter(s).elements()), sorted(s))
self.assertEqual(sorted(Counter(s)), sorted(set(s)))
self.assertEqual(dict(Counter(s)), dict(Counter(s).items()))
self.assertEqual(set(Counter(s)), set(s))
def test_invariant_for_the_in_operator(self):
c = Counter(a=10, b=-2, c=0)
for elem in c:
self.assertTrue(elem in c)
self.assertIn(elem, c)
def test_multiset_operations(self):
# Verify that adding a zero counter will strip zeros and negatives
c = Counter(a=10, b=-2, c=0) + Counter()
self.assertEqual(dict(c), dict(a=10))
elements = 'abcd'
for i in range(1000):
# test random pairs of multisets
p = Counter(dict((elem, randrange(-2,4)) for elem in elements))
p.update(e=1, f=-1, g=0)
q = Counter(dict((elem, randrange(-2,4)) for elem in elements))
q.update(h=1, i=-1, j=0)
for counterop, numberop in [
(Counter.__add__, lambda x, y: max(0, x+y)),
(Counter.__sub__, lambda x, y: max(0, x-y)),
(Counter.__or__, lambda x, y: max(0,x,y)),
(Counter.__and__, lambda x, y: max(0, min(x,y))),
]:
result = counterop(p, q)
for x in elements:
self.assertEqual(numberop(p[x], q[x]), result[x],
(counterop, x, p, q))
# verify that results exclude non-positive counts
self.assertTrue(x>0 for x in result.values())
elements = 'abcdef'
for i in range(100):
# verify that random multisets with no repeats are exactly like sets
p = Counter(dict((elem, randrange(0, 2)) for elem in elements))
q = Counter(dict((elem, randrange(0, 2)) for elem in elements))
for counterop, setop in [
(Counter.__sub__, set.__sub__),
(Counter.__or__, set.__or__),
(Counter.__and__, set.__and__),
]:
counter_result = counterop(p, q)
set_result = setop(set(p.elements()), set(q.elements()))
self.assertEqual(counter_result, dict.fromkeys(set_result, 1))
def test_subtract(self):
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50)
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter(a=-5, b=0, c=5, d=10, e=15,g=40)
c.subtract(Counter(a=1, b=2, c=-3, d=10, e=20, f=30, h=-50))
self.assertEqual(c, Counter(a=-6, b=-2, c=8, d=0, e=-5, f=-30, g=40, h=50))
c = Counter('aaabbcd')
c.subtract('aaaabbcce')
self.assertEqual(c, Counter(a=-1, b=0, c=-1, d=1, e=-1))
class TestOrderedDict(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
OrderedDict([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
self.assertEqual(sorted(OrderedDict(dict(pairs)).items()), pairs) # dict input
self.assertEqual(sorted(OrderedDict(**dict(pairs)).items()), pairs) # kwds input
self.assertEqual(list(OrderedDict(pairs).items()), pairs) # pairs input
self.assertEqual(list(OrderedDict([('a', 1), ('b', 2), ('c', 9), ('d', 4)],
c=3, e=5).items()), pairs) # mixed input
# make sure no positional args conflict with possible kwdargs
self.assertEqual(inspect.getargspec(OrderedDict.__dict__['__init__']).args,
['self'])
# Make sure that direct calls to __init__ do not clear previous contents
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.__init__([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_update(self):
with self.assertRaises(TypeError):
OrderedDict().update([('a', 1), ('b', 2)], None) # too many args
pairs = [('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5)]
od = OrderedDict()
od.update(dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # dict input
od = OrderedDict()
od.update(**dict(pairs))
self.assertEqual(sorted(od.items()), pairs) # kwds input
od = OrderedDict()
od.update(pairs)
self.assertEqual(list(od.items()), pairs) # pairs input
od = OrderedDict()
od.update([('a', 1), ('b', 2), ('c', 9), ('d', 4)], c=3, e=5)
self.assertEqual(list(od.items()), pairs) # mixed input
# Issue 9137: Named argument called 'other' or 'self'
# shouldn't be treated specially.
od = OrderedDict()
od.update(self=23)
self.assertEqual(list(od.items()), [('self', 23)])
od = OrderedDict()
od.update(other={})
self.assertEqual(list(od.items()), [('other', {})])
od = OrderedDict()
od.update(red=5, blue=6, other=7, self=8)
self.assertEqual(sorted(list(od.items())),
[('blue', 6), ('other', 7), ('red', 5), ('self', 8)])
# Make sure that direct calls to update do not clear previous contents
# add that updates items are not moved to the end
d = OrderedDict([('a', 1), ('b', 2), ('c', 3), ('d', 44), ('e', 55)])
d.update([('e', 5), ('f', 6)], g=7, d=4)
self.assertEqual(list(d.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4), ('e', 5), ('f', 6), ('g', 7)])
def test_abc(self):
self.assertIsInstance(OrderedDict(), MutableMapping)
self.assertTrue(issubclass(OrderedDict, MutableMapping))
def test_clear(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(len(od), len(pairs))
od.clear()
self.assertEqual(len(od), 0)
def test_delitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
del od['a']
self.assertNotIn('a', od)
with self.assertRaises(KeyError):
del od['a']
self.assertEqual(list(od.items()), pairs[:2] + pairs[3:])
def test_setitem(self):
od = OrderedDict([('d', 1), ('b', 2), ('c', 3), ('a', 4), ('e', 5)])
od['c'] = 10 # existing element
od['f'] = 20 # new element
self.assertEqual(list(od.items()),
[('d', 1), ('b', 2), ('c', 10), ('a', 4), ('e', 5), ('f', 20)])
def test_iterators(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
self.assertEqual(list(od), [t[0] for t in pairs])
self.assertEqual(od.keys()[:], [t[0] for t in pairs])
self.assertEqual(od.values()[:], [t[1] for t in pairs])
self.assertEqual(od.items()[:], pairs)
self.assertEqual(list(od.iterkeys()), [t[0] for t in pairs])
self.assertEqual(list(od.itervalues()), [t[1] for t in pairs])
self.assertEqual(list(od.iteritems()), pairs)
self.assertEqual(list(reversed(od)),
[t[0] for t in reversed(pairs)])
def test_popitem(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
while pairs:
self.assertEqual(od.popitem(), pairs.pop())
with self.assertRaises(KeyError):
od.popitem()
self.assertEqual(len(od), 0)
def test_pop(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
shuffle(pairs)
while pairs:
k, v = pairs.pop()
self.assertEqual(od.pop(k), v)
with self.assertRaises(KeyError):
od.pop('xyz')
self.assertEqual(len(od), 0)
self.assertEqual(od.pop(k, 12345), 12345)
# make sure pop still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
m = Missing(a=1)
self.assertEqual(m.pop('b', 5), 5)
self.assertEqual(m.pop('a', 6), 1)
self.assertEqual(m.pop('a', 6), 6)
with self.assertRaises(KeyError):
m.pop('a')
def test_equality(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od1 = OrderedDict(pairs)
od2 = OrderedDict(pairs)
self.assertEqual(od1, od2) # same order implies equality
pairs = pairs[2:] + pairs[:2]
od2 = OrderedDict(pairs)
self.assertNotEqual(od1, od2) # different order implies inequality
# comparison to regular dict is not order sensitive
self.assertEqual(od1, dict(od2))
self.assertEqual(dict(od2), od1)
# different length implied inequality
self.assertNotEqual(od1, OrderedDict(pairs[:-1]))
def test_copying(self):
# Check that ordered dicts are copyable, deepcopyable, picklable,
# and have a repr/eval round-trip
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
update_test = OrderedDict()
update_test.update(od)
for i, dup in enumerate([
od.copy(),
copy.copy(od),
copy.deepcopy(od),
pickle.loads(pickle.dumps(od, 0)),
pickle.loads(pickle.dumps(od, 1)),
pickle.loads(pickle.dumps(od, 2)),
pickle.loads(pickle.dumps(od, -1)),
eval(repr(od)),
update_test,
OrderedDict(od),
]):
self.assertTrue(dup is not od)
self.assertEqual(dup, od)
self.assertEqual(list(dup.items()), list(od.items()))
self.assertEqual(len(dup), len(od))
self.assertEqual(type(dup), type(od))
def test_yaml_linkage(self):
# Verify that __reduce__ is setup in a way that supports PyYAML's dump() feature.
# In yaml, lists are native but tuples are not.
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
# yaml.dump(od) -->
# '!!python/object/apply:__main__.OrderedDict\n- - [a, 1]\n - [b, 2]\n'
self.assertTrue(all(type(pair)==list for pair in od.__reduce__()[1]))
def test_reduce_not_too_fat(self):
# do not save instance dictionary if not needed
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
od = OrderedDict(pairs)
self.assertEqual(len(od.__reduce__()), 2)
od.x = 10
self.assertEqual(len(od.__reduce__()), 3)
def test_repr(self):
od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])
self.assertEqual(repr(od),
"OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])")
self.assertEqual(eval(repr(od)), od)
self.assertEqual(repr(OrderedDict()), "OrderedDict()")
def test_repr_recursive(self):
# See issue #9826
od = OrderedDict.fromkeys('abc')
od['x'] = od
self.assertEqual(repr(od),
"OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])")
def test_setdefault(self):
pairs = [('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]
shuffle(pairs)
od = OrderedDict(pairs)
pair_order = list(od.items())
self.assertEqual(od.setdefault('a', 10), 3)
# make sure order didn't change
self.assertEqual(list(od.items()), pair_order)
self.assertEqual(od.setdefault('x', 10), 10)
# make sure 'x' is added to the end
self.assertEqual(list(od.items())[-1], ('x', 10))
# make sure setdefault still works when __missing__ is defined
class Missing(OrderedDict):
def __missing__(self, key):
return 0
self.assertEqual(Missing().setdefault(5, 9), 9)
def test_reinsert(self):
# Given insert a, insert b, delete a, re-insert a,
# verify that a is now later than b.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
del od['a']
od['a'] = 1
self.assertEqual(list(od.items()), [('b', 2), ('a', 1)])
def test_views(self):
s = 'the quick brown fox jumped over a lazy dog yesterday before dawn'.split()
od = OrderedDict.fromkeys(s)
self.assertEqual(list(od.viewkeys()), s)
self.assertEqual(list(od.viewvalues()), [None for k in s])
self.assertEqual(list(od.viewitems()), [(k, None) for k in s])
def test_override_update(self):
# Verify that subclasses can override update() without breaking __init__()
class MyOD(OrderedDict):
def update(self, *args, **kwds):
raise Exception()
items = [('a', 1), ('c', 3), ('b', 2)]
self.assertEqual(list(MyOD(items).items()), items)
class GeneralMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = OrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
class MyOrderedDict(OrderedDict):
pass
class SubclassMappingTests(mapping_tests.BasicTestMappingProtocol):
type2test = MyOrderedDict
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
import collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs,
TestCollectionABCs, TestCounter,
TestOrderedDict, GeneralMappingTests, SubclassMappingTests]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
theo-l/django | django/db/models/indexes.py | 15 | 5245 | from django.db.backends.utils import names_digest, split_identifier
from django.db.models.query_utils import Q
from django.db.models.sql import Query
__all__ = ['Index']
class Index:
suffix = 'idx'
# The max length of the name of the index (restricted to 30 for
# cross-database compatibility with Oracle)
max_name_length = 30
def __init__(self, *, fields=(), name=None, db_tablespace=None, opclasses=(), condition=None):
if opclasses and not name:
raise ValueError('An index must be named to use opclasses.')
if not isinstance(condition, (type(None), Q)):
raise ValueError('Index.condition must be a Q instance.')
if condition and not name:
raise ValueError('An index must be named to use condition.')
if not isinstance(fields, (list, tuple)):
raise ValueError('Index.fields must be a list or tuple.')
if not isinstance(opclasses, (list, tuple)):
raise ValueError('Index.opclasses must be a list or tuple.')
if opclasses and len(fields) != len(opclasses):
raise ValueError('Index.fields and Index.opclasses must have the same number of elements.')
if not fields:
raise ValueError('At least one field is required to define an index.')
self.fields = list(fields)
# A list of 2-tuple with the field name and ordering ('' or 'DESC').
self.fields_orders = [
(field_name[1:], 'DESC') if field_name.startswith('-') else (field_name, '')
for field_name in self.fields
]
self.name = name or ''
self.db_tablespace = db_tablespace
self.opclasses = opclasses
self.condition = condition
def _get_condition_sql(self, model, schema_editor):
if self.condition is None:
return None
query = Query(model=model, alias_cols=False)
where = query.build_where(self.condition)
compiler = query.get_compiler(connection=schema_editor.connection)
sql, params = where.as_sql(compiler, schema_editor.connection)
return sql % tuple(schema_editor.quote_value(p) for p in params)
def create_sql(self, model, schema_editor, using='', **kwargs):
fields = [model._meta.get_field(field_name) for field_name, _ in self.fields_orders]
col_suffixes = [order[1] for order in self.fields_orders]
condition = self._get_condition_sql(model, schema_editor)
return schema_editor._create_index_sql(
model, fields, name=self.name, using=using, db_tablespace=self.db_tablespace,
col_suffixes=col_suffixes, opclasses=self.opclasses, condition=condition,
**kwargs,
)
def remove_sql(self, model, schema_editor, **kwargs):
return schema_editor._delete_index_sql(model, self.name, **kwargs)
def deconstruct(self):
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
path = path.replace('django.db.models.indexes', 'django.db.models')
kwargs = {'fields': self.fields, 'name': self.name}
if self.db_tablespace is not None:
kwargs['db_tablespace'] = self.db_tablespace
if self.opclasses:
kwargs['opclasses'] = self.opclasses
if self.condition:
kwargs['condition'] = self.condition
return (path, (), kwargs)
def clone(self):
"""Create a copy of this Index."""
_, _, kwargs = self.deconstruct()
return self.__class__(**kwargs)
def set_name_with_model(self, model):
"""
Generate a unique name for the index.
The name is divided into 3 parts - table name (12 chars), field name
(8 chars) and unique hash + suffix (10 chars). Each part is made to
fit its size by truncating the excess length.
"""
_, table_name = split_identifier(model._meta.db_table)
column_names = [model._meta.get_field(field_name).column for field_name, order in self.fields_orders]
column_names_with_order = [
(('-%s' if order else '%s') % column_name)
for column_name, (field_name, order) in zip(column_names, self.fields_orders)
]
# The length of the parts of the name is based on the default max
# length of 30 characters.
hash_data = [table_name] + column_names_with_order + [self.suffix]
self.name = '%s_%s_%s' % (
table_name[:11],
column_names[0][:7],
'%s_%s' % (names_digest(*hash_data, length=6), self.suffix),
)
assert len(self.name) <= self.max_name_length, (
'Index too long for multiple database support. Is self.suffix '
'longer than 3 characters?'
)
if self.name[0] == '_' or self.name[0].isdigit():
self.name = 'D%s' % self.name[1:]
def __repr__(self):
return "<%s: fields='%s'%s>" % (
self.__class__.__name__, ', '.join(self.fields),
'' if self.condition is None else ', condition=%s' % self.condition,
)
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.deconstruct() == other.deconstruct()
return NotImplemented
| bsd-3-clause |
cypreess/csvkit | tests/test_utilities/test_csvcut.py | 4 | 3586 | #!/usr/bin/env python
import StringIO
import unittest
from csvkit import CSVKitReader
from csvkit.utilities.csvcut import CSVCut
from csvkit.exceptions import ColumnIdentifierError, RequiredHeaderError
class TestCSVCut(unittest.TestCase):
def test_simple(self):
args = ['-c', '1,3', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'c'])
self.assertEqual(reader.next(), ['1', '3'])
def test_names(self):
args = ['-n', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
self.assertEqual(input_file.next(), ' 1: a\n')
self.assertEqual(input_file.next(), ' 2: b\n')
self.assertEqual(input_file.next(), ' 3: c\n')
def test_with_gzip(self):
args = ['-c', '1,3', 'examples/dummy.csv.gz']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'c'])
self.assertEqual(reader.next(), ['1', '3'])
def test_with_bzip2(self):
args = ['-c', '1,3', 'examples/dummy.csv.bz2']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a', 'c'])
self.assertEqual(reader.next(), ['1', '3'])
def test_exclude(self):
args = ['-C', '1,3', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['b'])
self.assertEqual(reader.next(), ['2'])
def test_include_and_exclude(self):
args = ['-c', '1,3', '-C', '3', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['a'])
self.assertEqual(reader.next(), ['1'])
def test_invalid_column(self):
args = ['-c', '0', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
self.assertRaises(ColumnIdentifierError, utility.main)
def test_invalid_options(self):
args = ['-n', '--no-header-row', 'examples/dummy.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
self.assertRaises(RequiredHeaderError, utility.main)
def test_no_header_row(self):
args = ['-c', '2', '--no-header-row', 'examples/no_header_row.csv']
output_file = StringIO.StringIO()
utility = CSVCut(args, output_file)
utility.main()
input_file = StringIO.StringIO(output_file.getvalue())
reader = CSVKitReader(input_file)
self.assertEqual(reader.next(), ['column2'])
self.assertEqual(reader.next(), ['2'])
| mit |
ToonBoxEntertainment/rez | src/rezgui/dialogs/ProcessDialog.py | 4 | 3642 | from rezgui.qt import QtCore, QtGui
from rezgui.util import create_pane
from rezgui.mixins.StoreSizeMixin import StoreSizeMixin
from rezgui.widgets.StreamableTextEdit import StreamableTextEdit
from rezgui.objects.App import app
from threading import Thread, Lock
class ProcessDialog(QtGui.QDialog, StoreSizeMixin):
"""A dialog that monitors a process and captures its output.
Note that in order to capture the process's output, you need to have piped
its stdout and stderr to subprocess.PIPE.
"""
def __init__(self, process, command_string, parent=None):
config_key = "layout/window/process"
super(ProcessDialog, self).__init__(parent)
StoreSizeMixin.__init__(self, app.config, config_key)
self.setWindowTitle("Running: %s" % command_string)
self.proc = process
self.ended = False
self.output_ended = False
self.capture_output = True
self.buffer = []
self.bar = QtGui.QProgressBar()
self.bar.setRange(0, 0)
self.edit = StreamableTextEdit()
close_btn = QtGui.QPushButton("Close")
btn_pane = create_pane([None, close_btn], True)
create_pane([self.bar, self.edit, btn_pane], False, parent_widget=self)
self.lock = Lock()
self.stdout_thread = Thread(target=self._read_output, args=(self.proc.stdout,))
self.stderr_thread = Thread(target=self._read_output, args=(self.proc.stderr,))
self.timer = QtCore.QTimer()
self.timer.setInterval(100)
self.timer.timeout.connect(self._update)
self.timer.start()
self.stdout_thread.start()
self.stderr_thread.start()
close_btn.clicked.connect(self.close)
def closeEvent(self, event):
self.capture_output = False
def _read_output(self, buf):
for line in buf:
try:
self.lock.acquire()
self.buffer.append(line)
finally:
self.lock.release()
if not self.capture_output:
break
def _update(self):
if not self.output_ended \
and not self.stdout_thread.is_alive() \
and not self.stderr_thread.is_alive() \
and self.proc.poll() is not None:
self.output_ended = True
self.buffer.append("\nProcess ended with returncode %d\n"
% self.proc.returncode)
if self.buffer:
try:
self.lock.acquire()
buf = self.buffer
self.buffer = []
finally:
self.lock.release()
txt = ''.join(buf)
print >> self.edit, txt
if not self.ended and self.proc.poll() is not None:
self.bar.setMaximum(10)
self.bar.setValue(10)
self.ended = True
if self.ended and self.output_ended:
self.timer.stop()
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 |
40223114/final | static/Brython3.1.1-20150328-091302/Lib/collections/abc.py | 739 | 16026 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
# Private list of types that we want to register with the various ABCs
# so that they will pass tests like:
# it = iter(somebytearray)
# assert isinstance(it, Iterable)
# Note: in other implementations, these types many not be distinct
# and they make have their own implementation specific types that
# are not included on this list.
bytes_iterator = type(iter(b''))
bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
mappingproxy = type(type.__dict__)
### ONE-TRICK PONIES ###
class Hashable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
return NotImplemented
class Iterable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Iterator(Iterable):
__slots__ = ()
@abstractmethod
def __next__(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if (any("__next__" in B.__dict__ for B in C.__mro__) and
any("__iter__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
Iterator.register(bytes_iterator)
Iterator.register(bytearray_iterator)
#Iterator.register(callable_iterator)
Iterator.register(dict_keyiterator)
Iterator.register(dict_valueiterator)
Iterator.register(dict_itemiterator)
Iterator.register(list_iterator)
Iterator.register(list_reverseiterator)
Iterator.register(range_iterator)
Iterator.register(set_iterator)
Iterator.register(str_iterator)
Iterator.register(tuple_iterator)
Iterator.register(zip_iterator)
class Sized(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Container(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
class Callable(metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if any("__call__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
__slots__ = ()
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxsize
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
__slots__ = ()
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
__slots__ = ()
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
Mapping.register(mappingproxy)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
KeysView.register(dict_keys)
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
ItemsView.register(dict_items)
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
ValuesView.register(dict_values)
class MutableMapping(Mapping):
__slots__ = ()
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
__slots__ = ()
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(str)
Sequence.register(range)
class ByteString(Sequence):
"""This unifies bytes and bytearray.
XXX Should add all their methods.
"""
__slots__ = ()
ByteString.register(bytes)
ByteString.register(bytearray)
class MutableSequence(Sequence):
__slots__ = ()
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def clear(self):
try:
while True:
self.pop()
except IndexError:
pass
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
MutableSequence.register(bytearray) # Multiply inheriting, see ByteString
| gpl-3.0 |
pymedusa/SickRage | medusa/clients/torrent/mlnet.py | 2 | 1427 | # coding=utf-8
"""MLDonkey Client."""
from __future__ import unicode_literals
from medusa.clients.torrent.generic import GenericClient
class MLNetAPI(GenericClient):
"""MLDonkey API class."""
def __init__(self, host=None, username=None, password=None):
"""Constructor.
:param host:
:type host: string
:param username:
:type username: string
:param password:
:type password: string
"""
super(MLNetAPI, self).__init__('mlnet', host, username, password)
self.url = self.host
# self.session.auth = HTTPDigestAuth(self.username, self.password);
def _get_auth(self):
try:
self.response = self.session.get(self.host, verify=False)
self.auth = self.response.content
except Exception:
return None
return self.auth if not self.response.status_code == 404 else None
def _add_torrent_uri(self, result):
self.url = '{host}submit'.format(host=self.host)
params = {
'q': 'dllink {url}'.format(url=result.url),
}
return self._request(method='get', params=params)
def _add_torrent_file(self, result):
self.url = '{host}submit'.format(host=self.host)
params = {
'q': 'dllink {url}'.format(url=result.url),
}
return self._request(method='get', params=params)
api = MLNetAPI
| gpl-3.0 |
frivoal/reftest-converter | ref-conv.py | 1 | 1182 | #!/usr/bin/python
import sys
def generate_tag( ref, xml=False ):
ref_name, match = ref
return "<link rel='"+ ["mismatch","match"][match] + "' href='" + ref_name + "'"+ [">"," />"][xml]
def find_refs(reftest_list):
tests = {}
for entry in reftest_list:
if entry.strip() == '':
continue
split_entry = entry.split()
if "#" in split_entry[0]:
continue
name = split_entry[1]
ref = split_entry[2]
match = split_entry[0] == '=='
tests.setdefault(name,[]).append( (ref, match))
return tests
if len(sys.argv) != 2:
print "Error: this script should be called with a path as argument"
exit(1)
path = sys.argv[1].replace("reftest.list","")
print "processing: " + path
entries= [line.strip() for line in open(path + 'reftest.list')]
tests= find_refs(entries)
for test in tests:
xml = False
if 'XHTML' in open(path + test).read():
xml = True
f = open(path + test, "r")
contents = f.readlines()
f.close()
for i, j in enumerate(contents):
if j.strip() == '<style>':
contents.insert(i, "\n".join(map(lambda t: generate_tag(t,xml), tests[test])) + "\n")
break
f = open(path + test, "w")
contents = "".join(contents)
f.write(contents)
f.close()
| mit |
shannonjlove/namebench | nb_third_party/dns/inet.py | 248 | 3236 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generic Internet address helper functions."""
import socket
import dns.ipv4
import dns.ipv6
# We assume that AF_INET is always defined.
AF_INET = socket.AF_INET
# AF_INET6 might not be defined in the socket module, but we need it.
# We'll try to use the socket module's value, and if it doesn't work,
# we'll use our own value.
try:
AF_INET6 = socket.AF_INET6
except AttributeError:
AF_INET6 = 9999
def inet_pton(family, text):
"""Convert the textual form of a network address into its binary form.
@param family: the address family
@type family: int
@param text: the textual address
@type text: string
@raises NotImplementedError: the address family specified is not
implemented.
@rtype: string
"""
if family == AF_INET:
return dns.ipv4.inet_aton(text)
elif family == AF_INET6:
return dns.ipv6.inet_aton(text)
else:
raise NotImplementedError
def inet_ntop(family, address):
"""Convert the binary form of a network address into its textual form.
@param family: the address family
@type family: int
@param address: the binary address
@type address: string
@raises NotImplementedError: the address family specified is not
implemented.
@rtype: string
"""
if family == AF_INET:
return dns.ipv4.inet_ntoa(address)
elif family == AF_INET6:
return dns.ipv6.inet_ntoa(address)
else:
raise NotImplementedError
def af_for_address(text):
"""Determine the address family of a textual-form network address.
@param text: the textual address
@type text: string
@raises ValueError: the address family cannot be determined from the input.
@rtype: int
"""
try:
junk = dns.ipv4.inet_aton(text)
return AF_INET
except:
try:
junk = dns.ipv6.inet_aton(text)
return AF_INET6
except:
raise ValueError
def is_multicast(text):
"""Is the textual-form network address a multicast address?
@param text: the textual address
@raises ValueError: the address family cannot be determined from the input.
@rtype: bool
"""
try:
first = ord(dns.ipv4.inet_aton(text)[0])
return (first >= 224 and first <= 239)
except:
try:
first = ord(dns.ipv6.inet_aton(text)[0])
return (first == 255)
except:
raise ValueError
| apache-2.0 |
okunishinishi/python-stringcase | stringcase.py | 1 | 4629 | """
String convert functions
"""
import re
def camelcase(string):
""" Convert string into camel case.
Args:
string: String to convert.
Returns:
string: Camel case string.
"""
string = re.sub(r"^[\-_\.]", '', str(lowercase(string)))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[\-_\.\s]([a-z])", lambda matched: uppercase(matched.group(1)), string[1:])
def capitalcase(string):
"""Convert string into capital case.
First letters will be uppercase.
Args:
string: String to convert.
Returns:
string: Capital case string.
"""
string = str(string)
if not string:
return string
return uppercase(string[0]) + string[1:]
def constcase(string):
"""Convert string into upper snake case.
Join punctuation with underscore and convert letters into uppercase.
Args:
string: String to convert.
Returns:
string: Const cased string.
"""
return uppercase(snakecase(string))
def lowercase(string):
"""Convert string into lower case.
Args:
string: String to convert.
Returns:
string: Lowercase case string.
"""
return str(string).lower()
def pascalcase(string):
"""Convert string into pascal case.
Args:
string: String to convert.
Returns:
string: Pascal case string.
"""
return capitalcase(camelcase(string))
def pathcase(string):
"""Convert string into path case.
Join punctuation with slash.
Args:
string: String to convert.
Returns:
string: Path cased string.
"""
string = snakecase(string)
if not string:
return string
return re.sub(r"_", "/", string)
def backslashcase(string):
"""Convert string into spinal case.
Join punctuation with backslash.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
"""
str1 = re.sub(r"_", r"\\", snakecase(string))
return str1
# return re.sub(r"\\n", "", str1)) # TODO: make regex fot \t ...
def sentencecase(string):
"""Convert string into sentence case.
First letter capped and each punctuations are joined with space.
Args:
string: String to convert.
Returns:
string: Sentence cased string.
"""
joiner = ' '
string = re.sub(r"[\-_\.\s]", joiner, str(string))
if not string:
return string
return capitalcase(trimcase(
re.sub(r"[A-Z]", lambda matched: joiner +
lowercase(matched.group(0)), string)
))
def snakecase(string):
"""Convert string into snake case.
Join punctuation with underscore
Args:
string: String to convert.
Returns:
string: Snake cased string.
"""
string = re.sub(r"[\-\.\s]", '_', str(string))
if not string:
return string
return lowercase(string[0]) + re.sub(r"[A-Z]", lambda matched: '_' + lowercase(matched.group(0)), string[1:])
def spinalcase(string):
"""Convert string into spinal case.
Join punctuation with hyphen.
Args:
string: String to convert.
Returns:
string: Spinal cased string.
"""
return re.sub(r"_", "-", snakecase(string))
def dotcase(string):
"""Convert string into dot case.
Join punctuation with dot.
Args:
string: String to convert.
Returns:
string: Dot cased string.
"""
return re.sub(r"_", ".", snakecase(string))
def titlecase(string):
"""Convert string into sentence case.
First letter capped while each punctuations is capitalsed
and joined with space.
Args:
string: String to convert.
Returns:
string: Title cased string.
"""
return ' '.join(
[capitalcase(word) for word in snakecase(string).split("_")]
)
def trimcase(string):
"""Convert string into trimmed string.
Args:
string: String to convert.
Returns:
string: Trimmed case string
"""
return str(string).strip()
def uppercase(string):
"""Convert string into upper case.
Args:
string: String to convert.
Returns:
string: Uppercase case string.
"""
return str(string).upper()
def alphanumcase(string):
"""Cuts all non-alphanumeric symbols,
i.e. cuts all expect except 0-9, a-z and A-Z.
Args:
string: String to convert.
Returns:
string: String with cutted non-alphanumeric symbols.
"""
return ''.join(filter(str.isalnum, str(string)))
| mit |
indrajitr/ansible | test/units/module_utils/common/parameters/test_list_deprecations.py | 37 | 1535 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.parameters import list_deprecations
@pytest.fixture
def params():
return {
'name': 'bob',
'dest': '/etc/hosts',
'state': 'present',
'value': 5,
}
def test_list_deprecations():
argument_spec = {
'old': {'type': 'str', 'removed_in_version': '2.5'},
'foo': {'type': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': 1.0}}},
'bar': {'type': 'list', 'elements': 'dict', 'options': {'old': {'type': 'str', 'removed_in_version': '2.10'}}},
}
params = {
'name': 'rod',
'old': 'option',
'foo': {'old': 'value'},
'bar': [{'old': 'value'}, {}],
}
result = list_deprecations(argument_spec, params)
assert len(result) == 3
result.sort(key=lambda entry: entry['msg'])
assert result[0]['msg'] == """Param 'bar["old"]' is deprecated. See the module docs for more information"""
assert result[0]['version'] == '2.10'
assert result[1]['msg'] == """Param 'foo["old"]' is deprecated. See the module docs for more information"""
assert result[1]['version'] == 1.0
assert result[2]['msg'] == "Param 'old' is deprecated. See the module docs for more information"
assert result[2]['version'] == '2.5'
| gpl-3.0 |
airmonitor/home_air_monitor | micropython/main.py | 1 | 6163 | from random import random
import connect_wifi
import ujson
import urequests
import utime
from boot import (
API_KEY,
API_URL,
LAT,
LONG,
PARTICLE_SENSOR,
SSID,
TEMP_HUM_PRESS_SENSOR,
TVOC_CO2_SENSOR,
WIFI_PASSWORD,
)
from i2c import I2CAdapter
from lib import logging
from machine import Pin, reset
if TEMP_HUM_PRESS_SENSOR == "BME680":
import bme680
elif TEMP_HUM_PRESS_SENSOR == "BME280":
import bme280
if PARTICLE_SENSOR in ["SDS011", "SDS021"]:
import sds011
SDS = sds011.SDS011(uart=2)
elif PARTICLE_SENSOR == "PMS7003":
from pms7003 import PassivePms7003
from pms7003 import UartError
if TVOC_CO2_SENSOR == "CCS811":
import CCS811
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
LOOP_COUNTER = 0
def sds_measurements():
try:
SDS.wake()
utime.sleep(10)
for _ in range(10):
SDS.read()
if SDS.pm25 != 0 and SDS.pm10 != 0:
return {"pm25": SDS.pm25, "pm10": SDS.pm10}
SDS.sleep()
except OSError:
return False
def pms7003_measurements():
try:
pms = PassivePms7003(uart=2)
pms.wakeup()
utime.sleep(10)
return pms.read()
except (OSError, UartError, TypeError):
return {}
finally:
try:
pms.sleep()
except (OSError, UartError, TypeError, NameError):
pass
def send_measurements(data):
LOG.info("Sending data to API %s", data)
try:
if data:
post_data = ujson.dumps(data)
res = urequests.post(
API_URL,
headers={"X-Api-Key": API_KEY, "Content-Type": "application/json"},
data=post_data,
).json()
LOG.info("API response %s", res)
blink_api_response(message=res)
return True
except IndexError:
return False
def get_particle_measurements():
data = {}
if PARTICLE_SENSOR == "PMS7003":
particle_data = pms7003_measurements()
if particle_data:
data = {
"pm1": round(particle_data["PM1_0_ATM"]),
"pm25": round(particle_data["PM2_5_ATM"]),
"pm10": round(particle_data["PM10_0_ATM"]),
}
elif PARTICLE_SENSOR in ["SDS011", "SDS021"]:
particle_data = sds_measurements()
try:
data = {
"pm25": round(particle_data["pm25"]),
"pm10": round(particle_data["pm10"]),
}
except TypeError:
pass
return data
def get_tvoc_co2():
if TVOC_CO2_SENSOR == "CCS811":
try:
sensor = CCS811.CCS811(i2c=i2c_dev, addr=90)
if sensor.data_ready():
return {"co2": sensor.eCO2, "tvoc": sensor.tVOC}
except (OSError, RuntimeError):
return False
def get_temp_humid_pressure_measurements():
if TEMP_HUM_PRESS_SENSOR == "BME680":
try:
sensor = bme680.BME680(i2c_device=i2c_dev)
sensor.set_humidity_oversample(bme680.OS_2X)
sensor.set_pressure_oversample(bme680.OS_4X)
sensor.set_temperature_oversample(bme680.OS_8X)
sensor.set_filter(bme680.FILTER_SIZE_3)
if sensor.get_sensor_data():
return {
"temperature": sensor.data.temperature,
"humidity": sensor.data.humidity,
"pressure": sensor.data.pressure,
"gas_resistance": sensor.data.gas_resistance,
}
except (OSError, RuntimeError):
return False
elif TEMP_HUM_PRESS_SENSOR == "BME280":
try:
bme = bme280.BME280(i2c=i2c_dev)
if bme.values:
LOG.info("BME280 readings %s", bme.values)
return {
"temperature": bme.values["temperature"],
"humidity": bme.values["humidity"],
"pressure": bme.values["pressure"],
}
except (OSError, RuntimeError):
return False
def augment_data(measurements, sensor_name):
data = {}
if measurements:
for k, v in measurements.items():
data[k] = round(v)
data["lat"] = LAT
data["long"] = LONG
data["sensor"] = sensor_name
return data
def blink():
led = Pin(2, Pin.OUT)
led.value(1)
utime.sleep(0.1)
led.value(0)
def blink_api_response(message):
if message.get("status") == "Metric saved":
blink()
utime.sleep(0.1)
blink()
if __name__ == "__main__":
if TEMP_HUM_PRESS_SENSOR:
i2c_dev = I2CAdapter(scl=Pin(022), sda=Pin(021), freq=100000)
while True:
try:
connect_wifi.connect(ssid=SSID, password=WIFI_PASSWORD)
utime.sleep(10)
if PARTICLE_SENSOR:
# PARTICLE_SENSOR
parsed_values = augment_data(
measurements=get_particle_measurements(),
sensor_name=PARTICLE_SENSOR,
)
send_measurements(data=parsed_values)
utime.sleep(1)
# TEMP_HUM_PRESS SENSOR
if TEMP_HUM_PRESS_SENSOR:
parsed_values = augment_data(
measurements=get_temp_humid_pressure_measurements(),
sensor_name=TEMP_HUM_PRESS_SENSOR,
)
send_measurements(data=parsed_values)
utime.sleep(1)
# CO2 TVOC SENSOR
if TVOC_CO2_SENSOR:
parsed_values = augment_data(
measurements=get_tvoc_co2(), sensor_name=TVOC_CO2_SENSOR
)
send_measurements(data=parsed_values)
LOOP_COUNTER += 1
LOG.info("Increasing loop_counter, actual value %s", LOOP_COUNTER)
if LOOP_COUNTER == 47:
reset()
utime.sleep(int(random() * 600 + 1500))
except Exception as error:
LOG.info("Caught exception %s", error)
reset()
| gpl-3.0 |
bsmith3541/rapcollab | venv/lib/python2.7/site-packages/distribute-0.6.34-py2.7.egg/site.py | 358 | 2418 | def __boot():
import sys, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
import imp # Avoid import loop in Python >= 3.3
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| mit |
adrianholovaty/django | django/views/defaults.py | 95 | 2430 | from django import http
from django.template import (Context, RequestContext,
loader, TemplateDoesNotExist)
from django.views.decorators.csrf import requires_csrf_token
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def page_not_found(request, template_name='404.html'):
"""
Default 404 handler.
Templates: `404.html`
Context:
request_path
The path of the requested URL (e.g., '/app/pages/bad_page/')
"""
t = loader.get_template(template_name) # You need to create a 404.html template.
return http.HttpResponseNotFound(t.render(RequestContext(request, {'request_path': request.path})))
@requires_csrf_token
def server_error(request, template_name='500.html'):
"""
500 error handler.
Templates: `500.html`
Context: None
"""
t = loader.get_template(template_name) # You need to create a 500.html template.
return http.HttpResponseServerError(t.render(Context({})))
# This can be called when CsrfViewMiddleware.process_view has not run,
# therefore need @requires_csrf_token in case the template needs
# {% csrf_token %}.
@requires_csrf_token
def permission_denied(request, template_name='403.html'):
"""
Permission denied (403) handler.
Templates: `403.html`
Context: None
If the template does not exist, an Http403 response containing the text
"403 Forbidden" (as per RFC 2616) will be returned.
"""
try:
template = loader.get_template(template_name)
except TemplateDoesNotExist:
return http.HttpResponseForbidden('<h1>403 Forbidden</h1>')
return http.HttpResponseForbidden(template.render(RequestContext(request)))
def shortcut(request, content_type_id, object_id):
# TODO: Remove this in Django 2.0.
# This is a legacy view that depends on the contenttypes framework.
# The core logic was moved to django.contrib.contenttypes.views after
# Django 1.0, but this remains here for backwards compatibility.
# Note that the import is *within* this function, rather than being at
# module level, because we don't want to assume people have contenttypes
# installed.
from django.contrib.contenttypes.views import shortcut as real_shortcut
return real_shortcut(request, content_type_id, object_id)
| bsd-3-clause |
danielkitta/libsigrokdecode | decoders/sdcard_sd/lists.py | 3 | 5992 | ##
## This file is part of the sigrok project.
##
## Copyright (C) 2015 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
# Normal commands (CMD)
# Unlisted items are 'Reserved' as per SD spec. The 'Unknown' items don't
# seem to be mentioned in the spec, but aren't marked as reserved either.
cmd_names = {
0: 'GO_IDLE_STATE',
# 1: Reserved
2: 'ALL_SEND_CID',
3: 'SEND_RELATIVE_ADDR',
4: 'SET_DSR',
5: 'IO_SEND_OP_COND', # SDIO-only
6: 'SWITCH_FUNC', # New since spec 1.10
7: 'SELECT/DESELECT_CARD',
8: 'SEND_IF_COND',
9: 'SEND_CSD',
10: 'SEND_CID',
11: 'VOLTAGE_SWITCH',
12: 'STOP_TRANSMISSION',
13: 'SEND_STATUS',
# 14: Reserved
15: 'GO_INACTIVE_STATE',
16: 'SET_BLOCKLEN',
17: 'READ_SINGLE_BLOCK',
18: 'READ_MULTIPLE_BLOCK',
19: 'SEND_TUNING_BLOCK',
20: 'SPEED_CLASS_CONTROL',
# 21-22: Reserved
23: 'SET_BLOCK_COUNT',
24: 'WRITE_BLOCK',
25: 'WRITE_MULTIPLE_BLOCK',
26: 'Reserved for manufacturer',
27: 'PROGRAM_CSD',
28: 'SET_WRITE_PROT',
29: 'CLR_WRITE_PROT',
30: 'SEND_WRITE_PROT',
# 31: Reserved
32: 'ERASE_WR_BLK_START',
33: 'ERASE_WR_BLK_END',
34: 'Reserved for CMD6', # New since spec 1.10
35: 'Reserved for CMD6', # New since spec 1.10
36: 'Reserved for CMD6', # New since spec 1.10
37: 'Reserved for CMD6', # New since spec 1.10
38: 'ERASE',
# 39: Reserved
40: 'Reserved for security specification',
# 41: Reserved
42: 'LOCK_UNLOCK',
# 43-49: Reserved
50: 'Reserved for CMD6', # New since spec 1.10
# 51: Reserved
52: 'IO_RW_DIRECT', # SDIO-only
53: 'IO_RW_EXTENDED', # SDIO-only
54: 'Unknown',
55: 'APP_CMD',
56: 'GEN_CMD',
57: 'Reserved for CMD6', # New since spec 1.10
# 58-59: Reserved
60: 'Reserved for manufacturer',
61: 'Reserved for manufacturer',
62: 'Reserved for manufacturer',
63: 'Reserved for manufacturer',
}
# Application-specific commands (ACMD)
# Unlisted items are 'Reserved' as per SD spec. The 'Unknown' items don't
# seem to be mentioned in the spec, but aren't marked as reserved either.
acmd_names = {
# 1-5: Reserved
6: 'SET_BUS_WIDTH',
# 7-12: Reserved
13: 'SD_STATUS',
14: 'Reserved for Security Application',
15: 'Reserved for Security Application',
16: 'Reserved for Security Application',
# 17: Reserved
18: 'Reserved for SD security applications',
# 19-21: Reserved
22: 'SEND_NUM_WR_BLOCKS',
23: 'SET_WR_BLK_ERASE_COUNT',
# 24: Reserved
25: 'Reserved for SD security applications',
26: 'Reserved for SD security applications',
27: 'Reserved for security specification',
28: 'Reserved for security specification',
# 29: Reserved
30: 'Reserved for security specification',
31: 'Reserved for security specification',
32: 'Reserved for security specification',
33: 'Reserved for security specification',
34: 'Reserved for security specification',
35: 'Reserved for security specification',
# 36-37: Reserved
38: 'Reserved for SD security applications',
# 39-40: Reserved
41: 'SD_SEND_OP_COND',
42: 'SET_CLR_CARD_DETECT',
43: 'Reserved for SD security applications',
44: 'Reserved for SD security applications',
45: 'Reserved for SD security applications',
46: 'Reserved for SD security applications',
47: 'Reserved for SD security applications',
48: 'Reserved for SD security applications',
49: 'Reserved for SD security applications',
50: 'Unknown',
51: 'SEND_SCR',
52: 'Reserved for security specification',
53: 'Reserved for security specification',
54: 'Reserved for security specification',
55: 'Non-existant', # Doesn't exist (equivalent to CMD55)
56: 'Reserved for security specification',
57: 'Reserved for security specification',
58: 'Reserved for security specification',
59: 'Reserved for security specification',
60: 'Unknown',
61: 'Unknown',
62: 'Unknown',
63: 'Unknown',
}
accepted_voltages = {
0b0001: '2.7-3.6V',
0b0010: 'reserved for low voltage range',
0b0100: 'reserved',
0b1000: 'reserved',
# All other values: "not defined".
}
card_status = {
0: 'Reserved for manufacturer test mode',
1: 'Reserved for manufacturer test mode',
2: 'Reserved for application specific commands',
3: 'AKE_SEQ_ERROR',
4: 'Reserved for SDIO card',
5: 'APP_CMD',
6: 'Unknown',
7: 'Unknown',
8: 'READY_FOR_DATA',
9: 'CURRENT_STATE', # CURRENT_STATE is a 4-bit value (decimal: 0..15).
10: 'CURRENT_STATE',
11: 'CURRENT_STATE',
12: 'CURRENT_STATE',
13: 'ERASE_RESET',
14: 'CARD_ECC_DISABLED',
15: 'WP_ERASE_SKIP',
16: 'CSD_OVERWRITE',
17: 'Reserved for DEFERRED_RESPONSE', # See eSD addendum
18: 'Reserved',
19: 'ERROR',
20: 'CC_ERROR',
21: 'CARD_ECC_FAILED',
22: 'ILLEGAL_COMMAND',
23: 'COM_CRC_ERROR',
24: 'LOCK_UNLOCK_FAILED',
25: 'CARD_IS_LOCKED',
26: 'WP_VIOLATION',
27: 'ERASE_PARAM',
28: 'ERASE_SEQ_ERROR',
29: 'BLOCK_LEN_ERROR',
30: 'ADDRESS_ERROR',
31: 'OUT_OF_RANGE',
}
sd_status = {
# 311:0: Reserved for manufacturer
# 391:312: Reserved
}
| gpl-3.0 |
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/pyasn1/type/opentype.py | 15 | 2848 | #
# This file is part of pyasn1 software.
#
# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pyasn1/license.html
#
__all__ = ['OpenType']
class OpenType(object):
"""Create ASN.1 type map indexed by a value
The *OpenType* object models an untyped field of a constructed ASN.1
type. In ASN.1 syntax it is usually represented by the
`ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
`SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
used together with :class:`~pyasn1.type.univ.Any` object.
OpenType objects duck-type a read-only Python :class:`dict` objects,
however the passed `typeMap` is not copied, but stored by reference.
That means the user can manipulate `typeMap` at run time having this
reflected on *OpenType* object behavior.
The |OpenType| class models an untyped field of a constructed ASN.1
type. In ASN.1 syntax it is usually represented by the
`ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
`SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
used with :class:`~pyasn1.type.univ.Any` type.
Parameters
----------
name: :py:class:`str`
Field name
typeMap: :py:class:`dict`
A map of value->ASN.1 type. It's stored by reference and can be
mutated later to register new mappings.
Examples
--------
For untyped scalars:
.. code-block:: python
openType = OpenType(
'id', {1: Integer(),
2: OctetString()}
)
Sequence(
componentType=NamedTypes(
NamedType('id', Integer()),
NamedType('blob', Any(), openType=openType)
)
)
For untyped `SET OF` or `SEQUENCE OF` vectors:
.. code-block:: python
openType = OpenType(
'id', {1: Integer(),
2: OctetString()}
)
Sequence(
componentType=NamedTypes(
NamedType('id', Integer()),
NamedType('blob', SetOf(componentType=Any()),
openType=openType)
)
)
"""
def __init__(self, name, typeMap=None):
self.__name = name
if typeMap is None:
self.__typeMap = {}
else:
self.__typeMap = typeMap
@property
def name(self):
return self.__name
# Python dict protocol
def values(self):
return self.__typeMap.values()
def keys(self):
return self.__typeMap.keys()
def items(self):
return self.__typeMap.items()
def __contains__(self, key):
return key in self.__typeMap
def __getitem__(self, key):
return self.__typeMap[key]
def __iter__(self):
return iter(self.__typeMap)
| apache-2.0 |
kvar/ansible | lib/ansible/plugins/doc_fragments/proxysql.py | 44 | 1391 | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt
class ModuleDocFragment(object):
# Documentation fragment for ProxySQL connectivity
CONNECTIVITY = r'''
options:
login_user:
description:
- The username used to authenticate to ProxySQL admin interface.
type: str
login_password:
description:
- The password used to authenticate to ProxySQL admin interface.
type: str
login_host:
description:
- The host used to connect to ProxySQL admin interface.
type: str
default: '127.0.0.1'
login_port:
description:
- The port used to connect to ProxySQL admin interface.
type: int
default: 6032
config_file:
description:
- Specify a config file from which I(login_user) and I(login_password)
are to be read.
type: path
default: ''
requirements:
- PyMySQL (Python 2.7 and Python 3.X), or
- MySQLdb (Python 2.x)
'''
# Documentation fragment for managing ProxySQL configuration
MANAGING_CONFIG = r'''
options:
save_to_disk:
description:
- Save config to sqlite db on disk to persist the configuration.
type: bool
default: 'yes'
load_to_runtime:
description:
- Dynamically load config to runtime memory.
type: bool
default: 'yes'
'''
| gpl-3.0 |
mementum/backtrader | backtrader/filters/renko.py | 1 | 4627 | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from . import Filter
__all__ = ['Renko']
class Renko(Filter):
'''Modify the data stream to draw Renko bars (or bricks)
Params:
- ``hilo`` (default: *False*) Use high and low instead of close to decide
if a new brick is needed
- ``size`` (default: *None*) The size to consider for each brick
- ``autosize`` (default: *20.0*) If *size* is *None*, this will be used
to autocalculate the size of the bricks (simply dividing the current
price by the given value)
- ``dynamic`` (default: *False*) If *True* and using *autosize*, the size
of the bricks will be recalculated when moving to a new brick. This
will of course eliminate the perfect alignment of Renko bricks.
- ``align`` (default: *1.0*) Factor use to align the price boundaries of
the bricks. If the price is for example *3563.25* and *align* is
*10.0*, the resulting aligned price will be *3560*. The calculation:
- 3563.25 / 10.0 = 356.325
- round it and remove the decimals -> 356
- 356 * 10.0 -> 3560
- ``roundstart`` (default: *True*) If *True*, round the initial start
value to int. Else keep the original value, which should aid when
backtesting penny stocks
See:
- http://stockcharts.com/school/doku.php?id=chart_school:chart_analysis:renko
'''
params = (
('hilo', False),
('size', None),
('autosize', 20.0),
('dynamic', False),
('align', 1.0),
('roundstart', True),
)
def nextstart(self, data):
o = data.open[0]
o = round(o / self.p.align, 0) * self.p.align # aligned
self._size = self.p.size or float(o // self.p.autosize)
if self.p.roundstart:
o = int(o)
self._top = o + self._size
self._bot = o - self._size
def next(self, data):
c = data.close[0]
h = data.high[0]
l = data.low[0]
if self.p.hilo:
hiprice = h
loprice = l
else:
hiprice = loprice = c
if hiprice >= self._top:
# deliver a renko brick from top -> top + size
self._bot = bot = self._top
if self.p.size is None and self.p.dynamic:
self._size = float(c // self.p.autosize)
top = bot + self._size
top = round(top / self.p.align, 0) * self.p.align # aligned
else:
top = bot + self._size
self._top = top
data.open[0] = bot
data.low[0] = bot
data.high[0] = top
data.close[0] = top
data.volume[0] = 0.0
data.openinterest[0] = 0.0
return False # length of data stream is unaltered
elif loprice <= self._bot:
# deliver a renko brick from bot -> bot - size
self._top = top = self._bot
if self.p.size is None and self.p.dynamic:
self._size = float(c // self.p.autosize)
bot = top - self._size
bot = round(bot / self.p.align, 0) * self.p.align # aligned
else:
bot = top - self._size
self._bot = bot
data.open[0] = top
data.low[0] = top
data.high[0] = bot
data.close[0] = bot
data.volume[0] = 0.0
data.openinterest[0] = 0.0
return False # length of data stream is unaltered
data.backwards()
return True # length of stream was changed, get new bar
| gpl-3.0 |
krishna-pandey-git/django | tests/view_tests/models.py | 281 | 1329 | """
Regression tests for Django built-in views.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
| bsd-3-clause |
untom/scikit-learn | examples/linear_model/plot_sgd_loss_functions.py | 249 | 1095 | """
==========================
SGD: convex loss functions
==========================
A plot that compares the various convex loss functions supported by
:class:`sklearn.linear_model.SGDClassifier` .
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def modified_huber_loss(y_true, y_pred):
z = y_pred * y_true
loss = -4 * z
loss[z >= -1] = (1 - z[z >= -1]) ** 2
loss[z >= 1.] = 0
return loss
xmin, xmax = -4, 4
xx = np.linspace(xmin, xmax, 100)
plt.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0), 'g-',
label="Hinge loss")
plt.plot(xx, -np.minimum(xx, 0), 'm-',
label="Perceptron loss")
plt.plot(xx, np.log2(1 + np.exp(-xx)), 'r-',
label="Log loss")
plt.plot(xx, np.where(xx < 1, 1 - xx, 0) ** 2, 'b-',
label="Squared hinge loss")
plt.plot(xx, modified_huber_loss(xx, 1), 'y--',
label="Modified Huber loss")
plt.ylim((0, 8))
plt.legend(loc="upper right")
plt.xlabel(r"Decision function $f(x)$")
plt.ylabel("$L(y, f(x))$")
plt.show()
| bsd-3-clause |
pratikmallya/hue | desktop/core/ext-py/Django-1.6.10/tests/inspectdb/models.py | 53 | 2394 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class People(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self')
class Message(models.Model):
from_field = models.ForeignKey(People, db_column='from_id')
class PeopleData(models.Model):
people_pk = models.ForeignKey(People, primary_key=True)
ssn = models.CharField(max_length=11)
class PeopleMoreData(models.Model):
people_unique = models.ForeignKey(People, unique=True)
license = models.CharField(max_length=255)
class DigitsInColumnName(models.Model):
all_digits = models.CharField(max_length=11, db_column='123')
leading_digit = models.CharField(max_length=11, db_column='4extra')
leading_digits = models.CharField(max_length=11, db_column='45extra')
class SpecialColumnName(models.Model):
field = models.IntegerField(db_column='field')
# Underscores
field_field_0 = models.IntegerField(db_column='Field_')
field_field_1 = models.IntegerField(db_column='Field__')
field_field_2 = models.IntegerField(db_column='__field')
# Other chars
prc_x = models.IntegerField(db_column='prc(%) x')
non_ascii = models.IntegerField(db_column='tamaño')
class ColumnTypes(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.NullBooleanField()
char_field = models.CharField(max_length=10)
comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
ip_address_field = models.IPAddressField()
gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4")
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
| apache-2.0 |
ai-course-project/shrdlite-course-project | cgi-bin/physics.py | 1 | 2349 | """make sure that we don't break the physics!"""
def check_physics_all(preds, objects):
return (all(check_physics(pred, objects) for pred in preds)
and check_uniqueness(preds))
def check_uniqueness(preds):
"""Check that we dont want to put two things inside a box or one thing inside two boxes
"""
ontop = filter(lambda p: p[0] in {'inside', 'ontop'}, preds)
for a in ontop:
for b in ontop:
(_, at, ab) = a
(_, bt, bb) = b
if (not a == b) and (at == bt or ab == bb):
return False
return True
def check_physics(pred, objects):
(rel, x, y) = pred
return (not x == y
and {'ontop': check_ontop,
'inside': check_ontop}.get(rel, lambda x, y, o: True)(x, y, objects))
def check_ontop(t, b, objects):
top = objects[t]
bot = objects[b]
return (
# Balls must be in boxes or on the floor, otherwise they roll away.
not (is_ball(top) and not is_form(bot, {'box', 'floor'}))
# Balls cannot support anything.
and not (is_ball(bot))
# Small objects cannot support large objects.
and not (is_small(bot) and is_large(top))
# Boxes cannot contain pyramids, planks or boxes of the same size.
and not (is_box(bot)
and is_form(top, {'pyramid', 'plank', 'box'})
and is_same(top, bot, 'size'))
# Small boxes cannot be supported by small bricks or pyramids.
and not (is_small(top) and is_box(top)
and is_small(bot)
and is_form(bot, {'brick', 'pyramid'}))
# Large boxes cannot be supported by large pyramids.
and not (is_large(top) and is_box(top)
and is_large(bot) and is_pyramid(bot)))
def is_form(o, s):
return o['form'] in s
def is_brick(o):
return o['form'] == 'brick'
def is_plank(o):
return o['form'] == 'plank'
def is_ball(o):
return o['form'] == 'ball'
def is_pyramid(o):
return o['form'] == 'pyramid'
def is_box(o):
return o['form'] == 'box'
def is_table(o):
return o['form'] == 'table'
def is_floor(o):
return o['form'] == 'floor'
def is_small(o):
return o['size'] == 'small'
def is_large(o):
return o['size'] == 'large'
def is_same(a, b, prop):
return a[prop] == b[prop]
| gpl-3.0 |
nicolas-petit/website | website_event_register_free/controllers/website_event.py | 16 | 3356 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import http
from openerp.addons.website_event.controllers.main import website_event
class WebsiteEvent(website_event):
def _validate(self, name, post, force_check=False):
if name in post or force_check:
if name == 'name' and not post.get('name', '').strip():
return False
if name == 'email' and not post.get('email', '').strip():
return False
if name == 'tickets' and (
not post.get('tickets', '').isdigit() or
int(post.get('tickets')) <= 0):
return False
return True
@http.route(['/event/<model("event.event"):event>/register/register_free'],
type='http', auth="public", website=True)
def event_register_free(self, event, **post):
def validate(name, force_check=False):
return self._validate(name, post, force_check=force_check)
reg_obj = http.request.env['event.registration']
registration_vals = {}
if (http.request.env.ref('base.public_user') !=
http.request.env.user and
validate('tickets', force_check=True)):
# if logged in, use that info
registration_vals = reg_obj._prepare_registration(
event, post, http.request.env.user.id,
partner=http.request.env.user.partner_id)
if all(map(lambda f: validate(f, force_check=True),
['name', 'email', 'tickets'])):
# otherwise, create a simple registration
registration_vals = reg_obj._prepare_registration(
event, post, http.request.env.user.id)
if registration_vals:
registration = reg_obj.sudo().create(registration_vals)
if registration.partner_id:
registration._onchange_partner()
registration.registration_open()
return http.request.render(
'website_event_register_free.partner_register_confirm',
{'registration': registration})
values = {
'event': event,
'range': range,
'tickets': post.get('tickets', 1),
'validate': validate,
'post': post,
}
return http.request.render(
'website_event_register_free.partner_register_form', values)
| agpl-3.0 |
zhaobin19918183/zhaobinCode | zhaobin/mysite/mysite/settings.py | 1 | 4649 | # -*- coding:UTF-8 -*- #必须在第一行或者第二行
# -*- coding:gb2312 -*- #必须在第一行或者第二行
# -*- coding:GBK -*- #必须在第一行或者第二行
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from django.conf import settings
from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS as TCP
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!l!1vdls4$u7$kl8o^%=jv_@j3xvq+)y3!2jj5$z(!sof_nug#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# 'django_admin_bootstrapped',
'suit',
'polls',
'avatar',
'django.contrib.admin',
'django.contrib.auth',
'users',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'booklist',
'filebrowser',
'photologue',
]
######
######
# 设置管理标题
SUIT_CONFIG = {
# header
'ADMIN_NAME': u'网站名',
'MENU': (
'sites',
{'app': 'polls', 'label': u'列表'},
{'app': 'booklist', 'label': u'汽车'},
),
}
#可以设置每页返回的数量,分段返回数据
REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
# 'PAGINATE_BY': 10
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE':'django.db.backends.postgresql_psycopg2',
'NAME': 'zhao',
'USER': 'admin1',
'PASSWORD': 'zb123456',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'zh_hans'
FILE_CHARSET = 'utf-8'
DEFAULT_CHARSET = 'utf-8'
USE_I18N = True
USE_L10N = True
# https://docs.djangoproject.com/en/1.9/topics/i18n/
TIME_ZONE = 'Asia/Shanghai'
USE_TZ = True
#static
STATIC_URL = '/static/'
MEDIA_URL ='/templates/'
MEDIA_ROOT = '/Users/zhaobin/Desktop/zhaobin/mysite/polls/templates'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
| gpl-3.0 |
vmarkovtsev/django | django/contrib/gis/db/backends/postgis/operations.py | 168 | 15689 | import re
from django.conf import settings
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.postgis.adapter import PostGISAdapter
from django.contrib.gis.db.backends.postgis.pgraster import (
from_pgraster, to_pgraster,
)
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
from .models import PostGISGeometryColumns, PostGISSpatialRefSys
from .pgraster import get_pgraster_srid
class PostGISOperator(SpatialOperator):
def __init__(self, geography=False, **kwargs):
# Only a subset of the operators and functions are available
# for the geography type.
self.geography = geography
super(PostGISOperator, self).__init__(**kwargs)
def as_sql(self, connection, lookup, *args):
if lookup.lhs.output_field.geography and not self.geography:
raise ValueError('PostGIS geography does not support the "%s" '
'function/operator.' % (self.func or self.op,))
return super(PostGISOperator, self).as_sql(connection, lookup, *args)
class PostGISDistanceOperator(PostGISOperator):
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def as_sql(self, connection, lookup, template_params, sql_params):
if not lookup.lhs.output_field.geography and lookup.lhs.output_field.geodetic(connection):
sql_template = self.sql_template
if len(lookup.rhs) == 3 and lookup.rhs[-1] == 'spheroid':
template_params.update({'op': self.op, 'func': 'ST_Distance_Spheroid'})
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s) %(op)s %%s'
else:
template_params.update({'op': self.op, 'func': 'ST_Distance_Sphere'})
return sql_template % template_params, sql_params
return super(PostGISDistanceOperator, self).as_sql(connection, lookup, template_params, sql_params)
class PostGISOperations(BaseSpatialOperations, DatabaseOperations):
name = 'postgis'
postgis = True
geography = True
geom_func_prefix = 'ST_'
version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)')
Adapter = PostGISAdapter
Adaptor = Adapter # Backwards-compatibility alias.
gis_operators = {
'bbcontains': PostGISOperator(op='~'),
'bboverlaps': PostGISOperator(op='&&', geography=True),
'contained': PostGISOperator(op='@'),
'contains': PostGISOperator(func='ST_Contains'),
'overlaps_left': PostGISOperator(op='&<'),
'overlaps_right': PostGISOperator(op='&>'),
'overlaps_below': PostGISOperator(op='&<|'),
'overlaps_above': PostGISOperator(op='|&>'),
'left': PostGISOperator(op='<<'),
'right': PostGISOperator(op='>>'),
'strictly_below': PostGISOperator(op='<<|'),
'stricly_above': PostGISOperator(op='|>>'),
'same_as': PostGISOperator(op='~='),
'exact': PostGISOperator(op='~='), # alias of same_as
'contains_properly': PostGISOperator(func='ST_ContainsProperly'),
'coveredby': PostGISOperator(func='ST_CoveredBy', geography=True),
'covers': PostGISOperator(func='ST_Covers', geography=True),
'crosses': PostGISOperator(func='ST_Crosses'),
'disjoint': PostGISOperator(func='ST_Disjoint'),
'equals': PostGISOperator(func='ST_Equals'),
'intersects': PostGISOperator(func='ST_Intersects', geography=True),
'overlaps': PostGISOperator(func='ST_Overlaps'),
'relate': PostGISOperator(func='ST_Relate'),
'touches': PostGISOperator(func='ST_Touches'),
'within': PostGISOperator(func='ST_Within'),
'dwithin': PostGISOperator(func='ST_DWithin', geography=True),
'distance_gt': PostGISDistanceOperator(func='ST_Distance', op='>', geography=True),
'distance_gte': PostGISDistanceOperator(func='ST_Distance', op='>=', geography=True),
'distance_lt': PostGISDistanceOperator(func='ST_Distance', op='<', geography=True),
'distance_lte': PostGISDistanceOperator(func='ST_Distance', op='<=', geography=True),
}
unsupported_functions = set()
function_names = {
'BoundingCircle': 'ST_MinimumBoundingCircle',
'MemSize': 'ST_Mem_Size',
'NumPoints': 'ST_NPoints',
}
def __init__(self, connection):
super(PostGISOperations, self).__init__(connection)
prefix = self.geom_func_prefix
self.area = prefix + 'Area'
self.bounding_circle = prefix + 'MinimumBoundingCircle'
self.centroid = prefix + 'Centroid'
self.collect = prefix + 'Collect'
self.difference = prefix + 'Difference'
self.distance = prefix + 'Distance'
self.distance_sphere = prefix + 'distance_sphere'
self.distance_spheroid = prefix + 'distance_spheroid'
self.envelope = prefix + 'Envelope'
self.extent = prefix + 'Extent'
self.extent3d = prefix + '3DExtent'
self.force_rhr = prefix + 'ForceRHR'
self.geohash = prefix + 'GeoHash'
self.geojson = prefix + 'AsGeoJson'
self.gml = prefix + 'AsGML'
self.intersection = prefix + 'Intersection'
self.kml = prefix + 'AsKML'
self.length = prefix + 'Length'
self.length3d = prefix + '3DLength'
self.length_spheroid = prefix + 'length_spheroid'
self.makeline = prefix + 'MakeLine'
self.mem_size = prefix + 'mem_size'
self.num_geom = prefix + 'NumGeometries'
self.num_points = prefix + 'npoints'
self.perimeter = prefix + 'Perimeter'
self.perimeter3d = prefix + '3DPerimeter'
self.point_on_surface = prefix + 'PointOnSurface'
self.polygonize = prefix + 'Polygonize'
self.reverse = prefix + 'Reverse'
self.scale = prefix + 'Scale'
self.snap_to_grid = prefix + 'SnapToGrid'
self.svg = prefix + 'AsSVG'
self.sym_difference = prefix + 'SymDifference'
self.transform = prefix + 'Transform'
self.translate = prefix + 'Translate'
self.union = prefix + 'Union'
self.unionagg = prefix + 'Union'
@cached_property
def spatial_version(self):
"""Determine the version of the PostGIS library."""
# Trying to get the PostGIS version because the function
# signatures will depend on the version used. The cost
# here is a database query to determine the version, which
# can be mitigated by setting `POSTGIS_VERSION` with a 3-tuple
# comprising user-supplied values for the major, minor, and
# subminor revision of PostGIS.
if hasattr(settings, 'POSTGIS_VERSION'):
version = settings.POSTGIS_VERSION
else:
# Run a basic query to check the status of the connection so we're
# sure we only raise the error below if the problem comes from
# PostGIS and not from PostgreSQL itself (see #24862).
self._get_postgis_func('version')
try:
vtup = self.postgis_version_tuple()
except ProgrammingError:
raise ImproperlyConfigured(
'Cannot determine PostGIS version for database "%s" '
'using command "SELECT postgis_lib_version()". '
'GeoDjango requires at least PostGIS version 2.0. '
'Was the database created from a spatial database '
'template?' % self.connection.settings_dict['NAME']
)
version = vtup[1:]
return version
def convert_extent(self, box, srid):
"""
Returns a 4-tuple extent for the `Extent` aggregate by converting
the bounding box text returned by PostGIS (`box` argument), for
example: "BOX(-90.0 30.0, -85.0 40.0)".
"""
if box is None:
return None
ll, ur = box[4:-1].split(',')
xmin, ymin = map(float, ll.split())
xmax, ymax = map(float, ur.split())
return (xmin, ymin, xmax, ymax)
def convert_extent3d(self, box3d, srid):
"""
Returns a 6-tuple extent for the `Extent3D` aggregate by converting
the 3d bounding-box text returned by PostGIS (`box3d` argument), for
example: "BOX3D(-90.0 30.0 1, -85.0 40.0 2)".
"""
if box3d is None:
return None
ll, ur = box3d[6:-1].split(',')
xmin, ymin, zmin = map(float, ll.split())
xmax, ymax, zmax = map(float, ur.split())
return (xmin, ymin, zmin, xmax, ymax, zmax)
def convert_geom(self, hex, geo_field):
"""
Converts the geometry returned from PostGIS aggretates.
"""
if hex:
return Geometry(hex, srid=geo_field.srid)
else:
return None
def geo_db_type(self, f):
"""
Return the database field type for the given spatial field.
"""
if f.geom_type == 'RASTER':
return 'raster'
elif f.geography:
if f.srid != 4326:
raise NotImplementedError('PostGIS only supports geography columns with an SRID of 4326.')
return 'geography(%s,%d)' % (f.geom_type, f.srid)
else:
# Type-based geometries.
# TODO: Support 'M' extension.
if f.dim == 3:
geom_type = f.geom_type + 'Z'
else:
geom_type = f.geom_type
return 'geometry(%s,%d)' % (geom_type, f.srid)
def get_distance(self, f, dist_val, lookup_type):
"""
Retrieve the distance parameters for the given geometry field,
distance lookup value, and the distance lookup type.
This is the most complex implementation of the spatial backends due to
what is supported on geodetic geometry columns vs. what's available on
projected geometry columns. In addition, it has to take into account
the geography column type.
"""
# Getting the distance parameter and any options.
if len(dist_val) == 1:
value, option = dist_val[0], None
else:
value, option = dist_val
# Shorthand boolean flags.
geodetic = f.geodetic(self.connection)
geography = f.geography
if isinstance(value, Distance):
if geography:
dist_param = value.m
elif geodetic:
if lookup_type == 'dwithin':
raise ValueError('Only numeric values of degree units are '
'allowed on geographic DWithin queries.')
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
# Assuming the distance is in the units of the field.
dist_param = value
if (not geography and geodetic and lookup_type != 'dwithin'
and option == 'spheroid'):
# using distance_spheroid requires the spheroid of the field as
# a parameter.
return [f._spheroid, dist_param]
else:
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
ST_Transform() function call.
"""
# Get the srid for this object
if value is None:
value_srid = None
elif f.geom_type == 'RASTER':
value_srid = get_pgraster_srid(value)
else:
value_srid = value.srid
# Adding Transform() to the SQL placeholder if the value srid
# is not equal to the field srid.
if value_srid is None or value_srid == f.srid:
placeholder = '%s'
elif f.geom_type == 'RASTER':
placeholder = '%s((%%s)::raster, %s)' % (self.transform, f.srid)
else:
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
if hasattr(value, 'as_sql'):
# If this is an F expression, then we don't really want
# a placeholder and instead substitute in the column
# of the expression.
sql, _ = compiler.compile(value)
placeholder = placeholder % sql
return placeholder
def _get_postgis_func(self, func):
"""
Helper routine for calling PostGIS functions and returning their result.
"""
# Close out the connection. See #9437.
with self.connection.temporary_connection() as cursor:
cursor.execute('SELECT %s()' % func)
return cursor.fetchone()[0]
def postgis_geos_version(self):
"Returns the version of the GEOS library used with PostGIS."
return self._get_postgis_func('postgis_geos_version')
def postgis_lib_version(self):
"Returns the version number of the PostGIS library used with PostgreSQL."
return self._get_postgis_func('postgis_lib_version')
def postgis_proj_version(self):
"Returns the version of the PROJ.4 library used with PostGIS."
return self._get_postgis_func('postgis_proj_version')
def postgis_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_version')
def postgis_full_version(self):
"Returns PostGIS version number and compile-time options."
return self._get_postgis_func('postgis_full_version')
def postgis_version_tuple(self):
"""
Returns the PostGIS version as a tuple (version string, major,
minor, subminor).
"""
# Getting the PostGIS version
version = self.postgis_lib_version()
m = self.version_regex.match(version)
if m:
major = int(m.group('major'))
minor1 = int(m.group('minor1'))
minor2 = int(m.group('minor2'))
else:
raise Exception('Could not parse PostGIS version string: %s' % version)
return (version, major, minor1, minor2)
def proj_version_tuple(self):
"""
Return the version of PROJ.4 used by PostGIS as a tuple of the
major, minor, and subminor release numbers.
"""
proj_regex = re.compile(r'(\d+)\.(\d+)\.(\d+)')
proj_ver_str = self.postgis_proj_version()
m = proj_regex.search(proj_ver_str)
if m:
return tuple(map(int, [m.group(1), m.group(2), m.group(3)]))
else:
raise Exception('Could not determine PROJ.4 version from PostGIS.')
def spatial_aggregate_name(self, agg_name):
if agg_name == 'Extent3D':
return self.extent3d
else:
return self.geom_func_prefix + agg_name
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
return PostGISGeometryColumns
def spatial_ref_sys(self):
return PostGISSpatialRefSys
# Methods to convert between PostGIS rasters and dicts that are
# readable by GDALRaster.
def parse_raster(self, value):
return from_pgraster(value)
def deconstruct_raster(self, value):
return to_pgraster(value)
| bsd-3-clause |
arthurlogilab/raven-python | raven/contrib/celery/__init__.py | 2 | 1974 | """
raven.contrib.celery
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from celery.signals import after_setup_logger, task_failure
from raven.handlers.logging import SentryHandler
class CeleryFilter(logging.Filter):
def filter(self, record):
# Context is fixed in Celery 3.x so use internal flag instead
extra_data = getattr(record, 'data', {})
if not isinstance(extra_data, dict):
return record.funcName != '_log_error'
# Fallback to funcName for Celery 2.5
return extra_data.get('internal', record.funcName != '_log_error')
def register_signal(client):
def process_failure_signal(sender, task_id, args, kwargs, **kw):
# This signal is fired inside the stack so let raven do its magic
client.captureException(
extra={
'task_id': task_id,
'task': sender,
'args': args,
'kwargs': kwargs,
})
task_failure.connect(process_failure_signal, weak=False)
def register_logger_signal(client, logger=None):
filter_ = CeleryFilter()
if logger is None:
logger = logging.getLogger()
handler = SentryHandler(client)
handler.setLevel(logging.ERROR)
handler.addFilter(filter_)
def process_logger_event(sender, logger, loglevel, logfile, format,
colorize, **kw):
# Attempt to find an existing SentryHandler, and if it exists ensure
# that the CeleryFilter is installed.
# If one is found, we do not attempt to install another one.
for h in logger.handlers:
if type(h) == SentryHandler:
h.addFilter(filter_)
return False
logger.addHandler(handler)
after_setup_logger.connect(process_logger_event, weak=False)
| bsd-3-clause |
ysekky/GPy | GPy/likelihoods/bernoulli.py | 2 | 10515 | # Copyright (c) 2012-2014 The GPy authors (see AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from ..util.univariate_Gaussian import std_norm_pdf, std_norm_cdf, derivLogCdfNormal, logCdfNormal
from . import link_functions
from .likelihood import Likelihood
class Bernoulli(Likelihood):
"""
Bernoulli likelihood
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
.. Note::
Y takes values in either {-1, 1} or {0, 1}.
link function should have the domain [0, 1], e.g. probit (default) or Heaviside
.. See also::
likelihood.py, for the parent class
"""
def __init__(self, gp_link=None):
if gp_link is None:
gp_link = link_functions.Probit()
super(Bernoulli, self).__init__(gp_link, 'Bernoulli')
if isinstance(gp_link , (link_functions.Heaviside, link_functions.Probit)):
self.log_concave = True
def to_dict(self):
input_dict = super(Bernoulli, self)._to_dict()
input_dict["class"] = "GPy.likelihoods.Bernoulli"
return input_dict
def _preprocess_values(self, Y):
"""
Check if the values of the observations correspond to the values
assumed by the likelihood function.
..Note:: Binary classification algorithm works better with classes {-1, 1}
"""
Y_prep = Y.copy()
Y1 = Y[Y.flatten()==1].size
Y2 = Y[Y.flatten()==0].size
assert Y1 + Y2 == Y.size, 'Bernoulli likelihood is meant to be used only with outputs in {0, 1}.'
Y_prep[Y.flatten() == 0] = -1
return Y_prep
def moments_match_ep(self, Y_i, tau_i, v_i, Y_metadata_i=None):
"""
Moments match of the marginal approximation in EP algorithm
:param i: number of observation (int)
:param tau_i: precision of the cavity distribution (float)
:param v_i: mean/variance of the cavity distribution (float)
"""
if Y_i == 1:
sign = 1.
elif Y_i == 0 or Y_i == -1:
sign = -1
else:
raise ValueError("bad value for Bernoulli observation (0, 1)")
if isinstance(self.gp_link, link_functions.Probit):
z = sign*v_i/np.sqrt(tau_i**2 + tau_i)
phi_div_Phi = derivLogCdfNormal(z)
log_Z_hat = logCdfNormal(z)
mu_hat = v_i/tau_i + sign*phi_div_Phi/np.sqrt(tau_i**2 + tau_i)
sigma2_hat = 1./tau_i - (phi_div_Phi/(tau_i**2+tau_i))*(z+phi_div_Phi)
elif isinstance(self.gp_link, link_functions.Heaviside):
z = sign*v_i/np.sqrt(tau_i)
phi_div_Phi = derivLogCdfNormal(z)
log_Z_hat = logCdfNormal(z)
mu_hat = v_i/tau_i + sign*phi_div_Phi/np.sqrt(tau_i)
sigma2_hat = (1. - a*phi_div_Phi - np.square(phi_div_Phi))/tau_i
else:
#TODO: do we want to revert to numerical quadrature here?
raise ValueError("Exact moment matching not available for link {}".format(self.gp_link.__name__))
# TODO: Output log_Z_hat instead of Z_hat (needs to be change in all others likelihoods)
return np.exp(log_Z_hat), mu_hat, sigma2_hat
def variational_expectations(self, Y, m, v, gh_points=None, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit):
if gh_points is None:
gh_x, gh_w = self._gh_points()
else:
gh_x, gh_w = gh_points
gh_w = gh_w / np.sqrt(np.pi)
shape = m.shape
m,v,Y = m.flatten(), v.flatten(), Y.flatten()
Ysign = np.where(Y==1,1,-1)
X = gh_x[None,:]*np.sqrt(2.*v[:,None]) + (m*Ysign)[:,None]
p = std_norm_cdf(X)
p = np.clip(p, 1e-9, 1.-1e-9) # for numerical stability
N = std_norm_pdf(X)
F = np.log(p).dot(gh_w)
NoverP = N/p
dF_dm = (NoverP*Ysign[:,None]).dot(gh_w)
dF_dv = -0.5*(NoverP**2 + NoverP*X).dot(gh_w)
return F.reshape(*shape), dF_dm.reshape(*shape), dF_dv.reshape(*shape), None
else:
raise NotImplementedError
def predictive_mean(self, mu, variance, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Probit):
return std_norm_cdf(mu/np.sqrt(1+variance))
elif isinstance(self.gp_link, link_functions.Heaviside):
return std_norm_cdf(mu/np.sqrt(variance))
else:
raise NotImplementedError
def predictive_variance(self, mu, variance, pred_mean, Y_metadata=None):
if isinstance(self.gp_link, link_functions.Heaviside):
return 0.
else:
return np.nan
def pdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Likelihood function given inverse link of f.
.. math::
p(y_{i}|\\lambda(f_{i})) = \\lambda(f_{i})^{y_{i}}(1-f_{i})^{1-y_{i}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: likelihood evaluated for this point
:rtype: float
.. Note:
Each y_i must be in {0, 1}
"""
#objective = (inv_link_f**y) * ((1.-inv_link_f)**(1.-y))
return np.where(y==1, inv_link_f, 1.-inv_link_f)
def logpdf_link(self, inv_link_f, y, Y_metadata=None):
"""
Log Likelihood function given inverse link of f.
.. math::
\\ln p(y_{i}|\\lambda(f_{i})) = y_{i}\\log\\lambda(f_{i}) + (1-y_{i})\\log (1-f_{i})
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: log likelihood evaluated at points inverse link of f.
:rtype: float
"""
#objective = y*np.log(inv_link_f) + (1.-y)*np.log(inv_link_f)
p = np.where(y==1, inv_link_f, 1.-inv_link_f)
return np.log(np.clip(p, 1e-9 ,np.inf))
def dlogpdf_dlink(self, inv_link_f, y, Y_metadata=None):
"""
Gradient of the pdf at y, given inverse link of f w.r.t inverse link of f.
.. math::
\\frac{d\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)} = \\frac{y_{i}}{\\lambda(f_{i})} - \\frac{(1 - y_{i})}{(1 - \\lambda(f_{i}))}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: gradient of log likelihood evaluated at points inverse link of f.
:rtype: Nx1 array
"""
#grad = (y/inv_link_f) - (1.-y)/(1-inv_link_f)
#grad = np.where(y, 1./inv_link_f, -1./(1-inv_link_f))
ff = np.clip(inv_link_f, 1e-9, 1-1e-9)
denom = np.where(y==1, ff, -(1-ff))
return 1./denom
def d2logpdf_dlink2(self, inv_link_f, y, Y_metadata=None):
"""
Hessian at y, given inv_link_f, w.r.t inv_link_f the hessian will be 0 unless i == j
i.e. second derivative logpdf at y given inverse link of f_i and inverse link of f_j w.r.t inverse link of f_i and inverse link of f_j.
.. math::
\\frac{d^{2}\\ln p(y_{i}|\\lambda(f_{i}))}{d\\lambda(f)^{2}} = \\frac{-y_{i}}{\\lambda(f)^{2}} - \\frac{(1-y_{i})}{(1-\\lambda(f))^{2}}
:param inv_link_f: latent variables inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: Diagonal of log hessian matrix (second derivative of log likelihood evaluated at points inverse link of f.
:rtype: Nx1 array
.. Note::
Will return diagonal of hessian, since every where else it is 0, as the likelihood factorizes over cases
(the distribution for y_i depends only on inverse link of f_i not on inverse link of f_(j!=i)
"""
#d2logpdf_dlink2 = -y/(inv_link_f**2) - (1-y)/((1-inv_link_f)**2)
#d2logpdf_dlink2 = np.where(y, -1./np.square(inv_link_f), -1./np.square(1.-inv_link_f))
arg = np.where(y==1, inv_link_f, 1.-inv_link_f)
ret = -1./np.square(np.clip(arg, 1e-9, 1e9))
if np.any(np.isinf(ret)):
stop
return ret
def d3logpdf_dlink3(self, inv_link_f, y, Y_metadata=None):
"""
Third order derivative log-likelihood function at y given inverse link of f w.r.t inverse link of f
.. math::
\\frac{d^{3} \\ln p(y_{i}|\\lambda(f_{i}))}{d^{3}\\lambda(f)} = \\frac{2y_{i}}{\\lambda(f)^{3}} - \\frac{2(1-y_{i}}{(1-\\lambda(f))^{3}}
:param inv_link_f: latent variables passed through inverse link of f.
:type inv_link_f: Nx1 array
:param y: data
:type y: Nx1 array
:param Y_metadata: Y_metadata not used in bernoulli
:returns: third derivative of log likelihood evaluated at points inverse_link(f)
:rtype: Nx1 array
"""
assert np.atleast_1d(inv_link_f).shape == np.atleast_1d(y).shape
#d3logpdf_dlink3 = 2*(y/(inv_link_f**3) - (1-y)/((1-inv_link_f)**3))
state = np.seterr(divide='ignore')
# TODO check y \in {0, 1} or {-1, 1}
d3logpdf_dlink3 = np.where(y==1, 2./(inv_link_f**3), -2./((1.-inv_link_f)**3))
np.seterr(**state)
return d3logpdf_dlink3
def predictive_quantiles(self, mu, var, quantiles, Y_metadata=None):
"""
Get the "quantiles" of the binary labels (Bernoulli draws). all the
quantiles must be either 0 or 1, since those are the only values the
draw can take!
"""
p = self.predictive_mean(mu, var)
return [np.asarray(p>(q/100.), dtype=np.int32) for q in quantiles]
def samples(self, gp, Y_metadata=None):
"""
Returns a set of samples of observations based on a given value of the latent variable.
:param gp: latent variable
"""
orig_shape = gp.shape
gp = gp.flatten()
ns = np.ones_like(gp, dtype=int)
Ysim = np.random.binomial(ns, self.gp_link.transf(gp))
return Ysim.reshape(orig_shape)
def exact_inference_gradients(self, dL_dKdiag,Y_metadata=None):
return np.zeros(self.size)
| bsd-3-clause |
chauhanhardik/populo_2 | cms/djangoapps/contentstore/management/commands/fix_not_found.py | 108 | 1110 | """
Script for fixing the item not found errors in a course
"""
from django.core.management.base import BaseCommand, CommandError
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
# To run from command line: ./manage.py cms fix_not_found course-v1:org+course+run
class Command(BaseCommand):
"""Fix a course's item not found errors"""
help = "Fix a course's ItemNotFound errors"
def handle(self, *args, **options):
"Execute the command"
if len(args) != 1:
raise CommandError("requires 1 argument: <course_id>")
course_key = CourseKey.from_string(args[0])
# for now only support on split mongo
# pylint: disable=protected-access
owning_store = modulestore()._get_modulestore_for_courselike(course_key)
if hasattr(owning_store, 'fix_not_found'):
owning_store.fix_not_found(course_key, ModuleStoreEnum.UserID.mgmt_command)
else:
raise CommandError("The owning modulestore does not support this command.")
| agpl-3.0 |
adconk/grandmaangieskitchen | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/tools/pretty_sln.py | 1831 | 5099 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prints the information in a sln file in a diffable way.
It first outputs each projects in alphabetical order with their
dependencies.
Then it outputs a possible build order.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import re
import sys
import pretty_vcproj
def BuildProject(project, built, projects, deps):
# if all dependencies are done, we can build it, otherwise we try to build the
# dependency.
# This is not infinite-recursion proof.
for dep in deps[project]:
if dep not in built:
BuildProject(dep, built, projects, deps)
print project
built.append(project)
def ParseSolution(solution_file):
# All projects, their clsid and paths.
projects = dict()
# A list of dependencies associated with a project.
dependencies = dict()
# Regular expressions that matches the SLN format.
# The first line of a project definition.
begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
r'}"\) = "(.*)", "(.*)", "(.*)"$')
# The last line of a project definition.
end_project = re.compile('^EndProject$')
# The first line of a dependency list.
begin_dep = re.compile(
r'ProjectSection\(ProjectDependencies\) = postProject$')
# The last line of a dependency list.
end_dep = re.compile('EndProjectSection$')
# A line describing a dependency.
dep_line = re.compile(' *({.*}) = ({.*})$')
in_deps = False
solution = open(solution_file)
for line in solution:
results = begin_project.search(line)
if results:
# Hack to remove icu because the diff is too different.
if results.group(1).find('icu') != -1:
continue
# We remove "_gyp" from the names because it helps to diff them.
current_project = results.group(1).replace('_gyp', '')
projects[current_project] = [results.group(2).replace('_gyp', ''),
results.group(3),
results.group(2)]
dependencies[current_project] = []
continue
results = end_project.search(line)
if results:
current_project = None
continue
results = begin_dep.search(line)
if results:
in_deps = True
continue
results = end_dep.search(line)
if results:
in_deps = False
continue
results = dep_line.search(line)
if results and in_deps and current_project:
dependencies[current_project].append(results.group(1))
continue
# Change all dependencies clsid to name instead.
for project in dependencies:
# For each dependencies in this project
new_dep_array = []
for dep in dependencies[project]:
# Look for the project name matching this cldis
for project_info in projects:
if projects[project_info][1] == dep:
new_dep_array.append(project_info)
dependencies[project] = sorted(new_dep_array)
return (projects, dependencies)
def PrintDependencies(projects, deps):
print "---------------------------------------"
print "Dependencies for all projects"
print "---------------------------------------"
print "-- --"
for (project, dep_list) in sorted(deps.items()):
print "Project : %s" % project
print "Path : %s" % projects[project][0]
if dep_list:
for dep in dep_list:
print " - %s" % dep
print ""
print "-- --"
def PrintBuildOrder(projects, deps):
print "---------------------------------------"
print "Build order "
print "---------------------------------------"
print "-- --"
built = []
for (project, _) in sorted(deps.items()):
if project not in built:
BuildProject(project, built, projects, deps)
print "-- --"
def PrintVCProj(projects):
for project in projects:
print "-------------------------------------"
print "-------------------------------------"
print project
print project
print project
print "-------------------------------------"
print "-------------------------------------"
project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
projects[project][2]))
pretty = pretty_vcproj
argv = [ '',
project_path,
'$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
]
argv.extend(sys.argv[3:])
pretty.main(argv)
def main():
# check if we have exactly 1 parameter.
if len(sys.argv) < 2:
print 'Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0]
return 1
(projects, deps) = ParseSolution(sys.argv[1])
PrintDependencies(projects, deps)
PrintBuildOrder(projects, deps)
if '--recursive' in sys.argv:
PrintVCProj(projects)
return 0
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
sol/pygments | pygments/lexers/erlang.py | 25 | 18936 | # -*- coding: utf-8 -*-
"""
pygments.lexers.erlang
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Erlang.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \
include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',
'ElixirLexer']
line_re = re.compile('.*?\n')
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
.. versionadded:: 0.9
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = (
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
)
builtins = ( # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
)
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = (
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
)
atom_re = r"(?:[a-z]\w*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_]\w*)'
esc_char_re = r'[bdefnrstv\'"\\]'
esc_octal_re = r'[0-7][0-7]?[0-7]?'
esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\{[0-9a-fA-F]+\})'
esc_ctrl_re = r'\^[a-zA-Z]'
escape_re = r'(?:\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
(words(keywords, suffix=r'\b'), Keyword),
(words(builtins, suffix=r'\b'), Name.Builtin),
(words(word_operators, suffix=r'\b'), Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
# Erlang script shebang
(r'\A#!.+\n', Comment.Hashbang),
# EEP 43: Maps
# http://www.erlang.org/eeps/eep-0043.html
(r'#\{', Punctuation, 'map_key'),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
'map_key': [
include('root'),
(r'=>', Punctuation, 'map_val'),
(r':=', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
.. versionadded:: 1.1
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
def gen_elixir_string_rules(name, symbol, token):
states = {}
states['string_' + name] = [
(r'[^#%s\\]+' % (symbol,), token),
include('escapes'),
(r'\\.', token),
(r'(%s)' % (symbol,), bygroups(token), "#pop"),
include('interpol')
]
return states
def gen_elixir_sigstr_rules(term, token, interpol=True):
if interpol:
return [
(r'[^#%s\\]+' % (term,), token),
include('escapes'),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
include('interpol')
]
else:
return [
(r'[^%s\\]+' % (term,), token),
(r'\\.', token),
(r'%s[a-zA-Z]*' % (term,), token, '#pop'),
]
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
.. versionadded:: 1.5
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')
KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')
BUILTIN = (
'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',
'quote', 'unquote', 'unquote_splicing', 'throw', 'super',
)
BUILTIN_DECLARATION = (
'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',
'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',
)
BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')
CONSTANT = ('nil', 'true', 'false')
PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')
OPERATORS3 = (
'<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',
'~>>', '<~>', '|~>', '<|>',
)
OPERATORS2 = (
'==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',
'->', '<-', '|', '.', '=', '~>', '<~',
)
OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')
PUNCTUATION = (
'\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',
)
def get_tokens_unprocessed(self, text):
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self.KEYWORD:
yield index, Keyword, value
elif value in self.KEYWORD_OPERATOR:
yield index, Operator.Word, value
elif value in self.BUILTIN:
yield index, Keyword, value
elif value in self.BUILTIN_DECLARATION:
yield index, Keyword.Declaration, value
elif value in self.BUILTIN_NAMESPACE:
yield index, Keyword.Namespace, value
elif value in self.CONSTANT:
yield index, Name.Constant, value
elif value in self.PSEUDO_VAR:
yield index, Name.Builtin.Pseudo, value
else:
yield index, token, value
else:
yield index, token, value
def gen_elixir_sigil_rules():
# all valid sigil terminators (excluding heredocs)
terminators = [
(r'\{', r'\}', 'cb'),
(r'\[', r'\]', 'sb'),
(r'\(', r'\)', 'pa'),
(r'<', r'>', 'ab'),
(r'/', r'/', 'slas'),
(r'\|', r'\|', 'pipe'),
('"', '"', 'quot'),
("'", "'", 'apos'),
]
# heredocs have slightly different rules
triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]
token = String.Other
states = {'sigils': []}
for term, name in triquotes:
states['sigils'] += [
(r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-intp')),
(r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
(name + '-end', name + '-no-intp')),
]
states[name + '-end'] = [
(r'[a-zA-Z]+', token, '#pop'),
default('#pop'),
]
states[name + '-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_interpol'),
]
states[name + '-no-intp'] = [
(r'^\s*' + term, String.Heredoc, '#pop'),
include('heredoc_no_interpol'),
]
for lterm, rterm, name in terminators:
states['sigils'] += [
(r'~[a-z]' + lterm, token, name + '-intp'),
(r'~[A-Z]' + lterm, token, name + '-no-intp'),
]
states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
states[name + '-no-intp'] = \
gen_elixir_sigstr_rules(rterm, token, interpol=False)
return states
op3_re = "|".join(re.escape(s) for s in OPERATORS3)
op2_re = "|".join(re.escape(s) for s in OPERATORS2)
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
alnum = '\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
special_atom_re = r'(?:\.\.\.|<<>>|%\{\}|%|\{\})'
long_hex_char_re = r'(\\x\{)([\da-fA-F]+)(\})'
hex_char_re = r'(\\x[\da-fA-F]{1,2})'
escape_char_re = r'(\\[abdefnrstv])'
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
# Various kinds of characters
(r'(\?)' + long_hex_char_re,
bygroups(String.Char,
String.Escape, Number.Hex, String.Escape)),
(r'(\?)' + hex_char_re,
bygroups(String.Char, String.Escape)),
(r'(\?)' + escape_char_re,
bygroups(String.Char, String.Escape)),
(r'\?\\?.', String.Char),
# '::' has to go before atoms
(r':::', String.Symbol),
(r'::', Operator),
# atoms
(r':' + special_atom_re, String.Symbol),
(r':' + complex_name_re, String.Symbol),
(r':"', String.Symbol, 'string_double_atom'),
(r":'", String.Symbol, 'string_single_atom'),
# [keywords: ...]
(r'(%s|%s)(:)(?=\s|\n)' % (special_atom_re, complex_name_re),
bygroups(String.Symbol, Punctuation)),
# @attributes
(r'@' + name_re, Name.Attribute),
# identifiers
(name_re, Name),
(r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),
# operators and punctuation
(op3_re, Operator),
(op2_re, Operator),
(punctuation_re, Punctuation),
(r'&\d', Name.Entity), # anon func arguments
(op1_re, Operator),
# numbers
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[\da-fA-F]+', Number.Hex),
(r'\d(_?\d)*\.\d(_?\d)*([eE][-+]?\d(_?\d)*)?', Number.Float),
(r'\d(_?\d)*', Number.Integer),
# strings and heredocs
(r'"""\s*', String.Heredoc, 'heredoc_double'),
(r"'''\s*$", String.Heredoc, 'heredoc_single'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single'),
include('sigils'),
(r'%\{', Punctuation, 'map_key'),
(r'\{', Punctuation, 'tuple'),
],
'heredoc_double': [
(r'^\s*"""', String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_single': [
(r"^\s*'''", String.Heredoc, '#pop'),
include('heredoc_interpol'),
],
'heredoc_interpol': [
(r'[^#\\\n]+', String.Heredoc),
include('escapes'),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
include('interpol'),
],
'heredoc_no_interpol': [
(r'[^\\\n]+', String.Heredoc),
(r'\\.', String.Heredoc),
(r'\n+', String.Heredoc),
],
'escapes': [
(long_hex_char_re,
bygroups(String.Escape, Number.Hex, String.Escape)),
(hex_char_re, String.Escape),
(escape_char_re, String.Escape),
],
'interpol': [
(r'#\{', String.Interpol, 'interpol_string'),
],
'interpol_string': [
(r'\}', String.Interpol, "#pop"),
include('root')
],
'map_key': [
include('root'),
(r':', Punctuation, 'map_val'),
(r'=>', Punctuation, 'map_val'),
(r'\}', Punctuation, '#pop'),
],
'map_val': [
include('root'),
(r',', Punctuation, '#pop'),
(r'(?=\})', Punctuation, '#pop'),
],
'tuple': [
include('root'),
(r'\}', Punctuation, '#pop'),
],
}
tokens.update(gen_elixir_string_rules('double', '"', String.Double))
tokens.update(gen_elixir_string_rules('single', "'", String.Single))
tokens.update(gen_elixir_string_rules('double_atom', '"', String.Symbol))
tokens.update(gen_elixir_string_rules('single_atom', "'", String.Symbol))
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
.. versionadded:: 1.5
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
in_error = False
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
in_error = True
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
in_error = False
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
token = Generic.Error if in_error else Generic.Output
yield match.start(), token, line
if curcode:
for item in do_insertions(
insertions, exlexer.get_tokens_unprocessed(curcode)):
yield item
| bsd-2-clause |
kisna72/django | django/contrib/gis/gdal/libgdal.py | 449 | 3598 | from __future__ import unicode_literals
import logging
import os
import re
from ctypes import CDLL, CFUNCTYPE, c_char_p, c_int
from ctypes.util import find_library
from django.contrib.gis.gdal.error import GDALException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GDAL_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT shared libraries
lib_names = ['gdal111', 'gdal110', 'gdal19', 'gdal18', 'gdal17']
elif os.name == 'posix':
# *NIX library names.
lib_names = ['gdal', 'GDAL', 'gdal1.11.0', 'gdal1.10.0', 'gdal1.9.0',
'gdal1.8.0', 'gdal1.7.0']
else:
raise GDALException('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the
# path to the GDAL library from the list of library names.
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
if lib_path is None:
raise GDALException('Could not find the GDAL library (tried "%s"). '
'Try setting GDAL_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names))
# This loads the GDAL/OGR C library
lgdal = CDLL(lib_path)
# On Windows, the GDAL binaries have some OSR routines exported with
# STDCALL, while others are not. Thus, the library will also need to
# be loaded up as WinDLL for said OSR functions that require the
# different calling convention.
if os.name == 'nt':
from ctypes import WinDLL
lwingdal = WinDLL(lib_path)
def std_call(func):
"""
Returns the correct STDCALL function for certain OSR routines on Win32
platforms.
"""
if os.name == 'nt':
return lwingdal[func]
else:
return lgdal[func]
# #### Version-information functions. ####
# Returns GDAL library version information with the given key.
_version_info = std_call('GDALVersionInfo')
_version_info.argtypes = [c_char_p]
_version_info.restype = c_char_p
def gdal_version():
"Returns only the GDAL version number information."
return _version_info(b'RELEASE_NAME')
def gdal_full_version():
"Returns the full GDAL version information."
return _version_info('')
version_regex = re.compile(r'^(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<subminor>\d+))?')
def gdal_version_info():
ver = gdal_version().decode()
m = version_regex.match(ver)
if not m:
raise GDALException('Could not parse GDAL version string "%s"' % ver)
return {key: m.group(key) for key in ('major', 'minor', 'subminor')}
_verinfo = gdal_version_info()
GDAL_MAJOR_VERSION = int(_verinfo['major'])
GDAL_MINOR_VERSION = int(_verinfo['minor'])
GDAL_SUBMINOR_VERSION = _verinfo['subminor'] and int(_verinfo['subminor'])
GDAL_VERSION = (GDAL_MAJOR_VERSION, GDAL_MINOR_VERSION, GDAL_SUBMINOR_VERSION)
del _verinfo
# Set library error handling so as errors are logged
CPLErrorHandler = CFUNCTYPE(None, c_int, c_int, c_char_p)
def err_handler(error_class, error_number, message):
logger.error('GDAL_ERROR %d: %s' % (error_number, message))
err_handler = CPLErrorHandler(err_handler)
def function(name, args, restype):
func = std_call(name)
func.argtypes = args
func.restype = restype
return func
set_error_handler = function('CPLSetErrorHandler', [CPLErrorHandler], CPLErrorHandler)
set_error_handler(err_handler)
| bsd-3-clause |
sestrella/ansible | test/lib/ansible_test/_internal/sanity/validate_modules.py | 19 | 3400 | """Sanity test using validate-modules."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
from .. import types as t
from ..sanity import (
SanitySingleVersion,
SanityMessage,
SanityFailure,
SanitySuccess,
SANITY_ROOT,
)
from ..target import (
TestTarget,
)
from ..util import (
SubprocessError,
display,
find_python,
)
from ..util_common import (
run_command,
)
from ..ansible_util import (
ansible_environment,
)
from ..config import (
SanityConfig,
)
from ..data import (
data_context,
)
class ValidateModulesTest(SanitySingleVersion):
"""Sanity test using validate-modules."""
@property
def error_code(self): # type: () -> t.Optional[str]
"""Error code for ansible-test matching the format used by the underlying test program, or None if the program does not use error codes."""
return 'A100'
def filter_targets(self, targets): # type: (t.List[TestTarget]) -> t.List[TestTarget]
"""Return the given list of test targets, filtered to include only those relevant for the test."""
return [target for target in targets if target.module]
def test(self, args, targets, python_version):
"""
:type args: SanityConfig
:type targets: SanityTargets
:type python_version: str
:rtype: TestResult
"""
env = ansible_environment(args, color=False)
settings = self.load_processor(args)
paths = [target.path for target in targets.include]
cmd = [
find_python(python_version),
os.path.join(SANITY_ROOT, 'validate-modules', 'validate-modules'),
'--format', 'json',
'--arg-spec',
] + paths
if data_context().content.collection:
cmd.extend(['--collection', data_context().content.collection.directory])
else:
if args.base_branch:
cmd.extend([
'--base-branch', args.base_branch,
])
else:
display.warning('Cannot perform module comparison against the base branch. Base branch not detected when running locally.')
try:
stdout, stderr = run_command(args, cmd, env=env, capture=True)
status = 0
except SubprocessError as ex:
stdout = ex.stdout
stderr = ex.stderr
status = ex.status
if stderr or status not in (0, 3):
raise SubprocessError(cmd=cmd, status=status, stderr=stderr, stdout=stdout)
if args.explain:
return SanitySuccess(self.name)
messages = json.loads(stdout)
errors = []
for filename in messages:
output = messages[filename]
for item in output['errors']:
errors.append(SanityMessage(
path=filename,
line=int(item['line']) if 'line' in item else 0,
column=int(item['column']) if 'column' in item else 0,
level='error',
code='%s' % item['code'],
message=item['msg'],
))
errors = settings.process_errors(errors, paths)
if errors:
return SanityFailure(self.name, messages=errors)
return SanitySuccess(self.name)
| gpl-3.0 |
manipopopo/tensorflow | tensorflow/contrib/model_pruning/python/learning.py | 27 | 8058 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper around tf-slim's training code contrib/slim/python/slim/learning.py
to support training of pruned models
*******************************************************************
* A simple working training script with support for model pruning *
*******************************************************************
# Load data and create the model:
images, labels = LoadData(...)
predictions = MyModel(images)
# Define the loss:
slim.losses.log_loss(predictions, labels)
total_loss = slim.losses.get_total_loss()
# Define the optimizer:
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Create the train_op
train_op = slim.learning.create_train_op(total_loss, optimizer)
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Run training.
learning.train(train_op,
my_log_dir,
mask_update_op)
see contrib/slim/python/slim/learning.py for additional examples
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as _slim
_USE_DEFAULT = 0
train_step = _slim.learning.train_step
def train(train_op,
logdir,
mask_update_op,
train_step_fn=train_step,
train_step_kwargs=_USE_DEFAULT,
log_every_n_steps=1,
graph=None,
master='',
is_chief=True,
global_step=None,
number_of_steps=None,
init_op=_USE_DEFAULT,
init_feed_dict=None,
local_init_op=_USE_DEFAULT,
init_fn=None,
ready_op=_USE_DEFAULT,
summary_op=_USE_DEFAULT,
save_summaries_secs=600,
summary_writer=_USE_DEFAULT,
startup_delay_steps=0,
saver=None,
save_interval_secs=600,
sync_optimizer=None,
session_config=None,
trace_every_n_steps=None):
"""Wrapper around tf-slim's train function.
Runs a training loop using a TensorFlow supervisor.
When the sync_optimizer is supplied, gradient updates are applied
synchronously. Otherwise, gradient updates are applied asynchronous.
Args:
train_op: A `Tensor` that, when executed, will apply the gradients and
return the loss value.
logdir: The directory where training logs are written to. If None, model
checkpoints and summaries will not be written.
mask_update_op: Operation that upon execution updates the weight masks and
thresholds.
train_step_fn: The function to call in order to execute a single gradient
step. The function must have take exactly four arguments: the current
session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.
train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By
default, two `Boolean`, scalar ops called "should_stop" and "should_log"
are provided.
log_every_n_steps: The frequency, in terms of global steps, that the loss
and global step and logged.
graph: The graph to pass to the supervisor. If no graph is supplied the
default graph is used.
master: The address of the tensorflow master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
global_step: The `Tensor` representing the global step. If left as `None`,
then slim.variables.get_or_create_global_step() is used.
number_of_steps: The max number of gradient steps to take during training,
as measured by 'global_step': training will stop if global_step is
greater than 'number_of_steps'. If the value is left as None, training
proceeds indefinitely.
init_op: The initialization operation. If left to its default value, then
the session is initialized by calling `tf.global_variables_initializer()`.
init_feed_dict: A feed dictionary to use when executing the `init_op`.
local_init_op: The local initialization operation. If left to its default
value, then the session is initialized by calling
`tf.local_variables_initializer()` and `tf.tables_initializer()`.
init_fn: An optional callable to be executed after `init_op` is called. The
callable must accept one argument, the session being initialized.
ready_op: Operation to check if the model is ready to use. If left to its
default value, then the session checks for readiness by calling
`tf.report_uninitialized_variables()`.
summary_op: The summary operation.
save_summaries_secs: How often, in seconds, to save summaries.
summary_writer: `SummaryWriter` to use. Can be `None`
to indicate that no summaries should be written. If unset, we
create a SummaryWriter.
startup_delay_steps: The number of steps to wait for before beginning. Note
that this must be 0 if a sync_optimizer is supplied.
saver: Saver to save checkpoints. If None, a default one will be created
and used.
save_interval_secs: How often, in seconds, to save the model to `logdir`.
sync_optimizer: an instance of tf.train.SyncReplicasOptimizer, or a list of
them. If the argument is supplied, gradient updates will be synchronous.
If left as `None`, gradient updates will be asynchronous.
session_config: An instance of `tf.ConfigProto` that will be used to
configure the `Session`. If left as `None`, the default will be used.
trace_every_n_steps: produce and save a `Timeline` in Chrome trace format
and add it to the summaries every `trace_every_n_steps`. If None, no trace
information will be produced or saved.
Returns:
the value of the loss function after training.
Raises:
ValueError: if `train_op` is empty or if `startup_delay_steps` is
non-zero when `sync_optimizer` is supplied, if `number_of_steps` is
negative, or if `trace_every_n_steps` is not `None` and no `logdir` is
provided.
"""
def train_step_with_pruning_fn(sess, train_op, global_step,
train_step_kwargs):
total_loss, should_stop = train_step_fn(sess, train_op, global_step,
train_step_kwargs)
sess.run(mask_update_op)
return total_loss, should_stop
total_loss, _ = _slim.learning.train(
train_op,
logdir,
train_step_fn=train_step_with_pruning_fn,
train_step_kwargs=train_step_kwargs,
log_every_n_steps=log_every_n_steps,
graph=graph,
master=master,
is_chief=is_chief,
global_step=global_step,
number_of_steps=number_of_steps,
init_op=init_op,
init_feed_dict=init_feed_dict,
local_init_op=local_init_op,
init_fn=init_fn,
ready_op=ready_op,
summary_op=summary_op,
save_summaries_secs=save_summaries_secs,
summary_writer=summary_writer,
startup_delay_steps=startup_delay_steps,
saver=saver,
save_interval_secs=save_interval_secs,
sync_optimizer=sync_optimizer,
session_config=session_config,
trace_every_n_steps=trace_every_n_steps)
return total_loss
| apache-2.0 |
MinerKasch/dd-agent | utils/debug.py | 5 | 1832 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from pprint import pprint
import inspect
import os
import sys
# datadog
from config import get_checksd_path, get_confd_path
from util import get_os
def run_check(name, path=None):
"""
Test custom checks on Windows.
"""
# Read the config file
confd_path = path or os.path.join(get_confd_path(get_os()), '%s.yaml' % name)
try:
f = open(confd_path)
except IOError:
raise Exception('Unable to open configuration at %s' % confd_path)
config_str = f.read()
f.close()
# Run the check
check, instances = get_check(name, config_str)
if not instances:
raise Exception('YAML configuration returned no instances.')
for instance in instances:
check.check(instance)
if check.has_events():
print "Events:\n"
pprint(check.get_events(), indent=4)
print "Metrics:\n"
pprint(check.get_metrics(), indent=4)
def get_check(name, config_str):
from checks import AgentCheck
checksd_path = get_checksd_path(get_os())
if checksd_path not in sys.path:
sys.path.append(checksd_path)
check_module = __import__(name)
check_class = None
classes = inspect.getmembers(check_module, inspect.isclass)
for name, clsmember in classes:
if AgentCheck in clsmember.__bases__:
check_class = clsmember
break
if check_class is None:
raise Exception("Unable to import check %s. Missing a class that inherits AgentCheck" % name)
agentConfig = {
'version': '0.1',
'api_key': 'tota'
}
return check_class.from_yaml(yaml_text=config_str, check_name=name,
agentConfig=agentConfig)
| bsd-3-clause |
pombredanne/0install | zeroinstall/injector/packagekit.py | 2 | 13740 | """
PackageKit integration.
"""
# Copyright (C) 2010, Aleksey Lim
# See the README file for details, or visit http://0install.net.
import os, sys
import locale
import logging
from zeroinstall import _, SafeException
from zeroinstall.support import tasks, unicode
from zeroinstall.injector import download, model
_logger_pk = logging.getLogger('0install.packagekit')
#_logger_pk.setLevel(logging.DEBUG)
try:
import dbus
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
except Exception as ex:
_logger_pk.info("D-BUS not available: %s", ex)
dbus = None
MAX_PACKAGE_KIT_TRANSACTION_SIZE = 100
class PackageKit(object):
def __init__(self):
self._pk = False
self._candidates = {} # { package_name : [ (version, arch, size) ] | Blocker }
# PackageKit is really slow at handling separate queries, so we use this to
# batch them up.
self._next_batch = set()
@property
def available(self):
return self.pk is not None
@property
def pk(self):
if self._pk is False:
if dbus is None:
self._pk = None
else:
try:
self._pk = dbus.Interface(dbus.SystemBus().get_object(
'org.freedesktop.PackageKit',
'/org/freedesktop/PackageKit', False),
'org.freedesktop.PackageKit')
_logger_pk.info(_('PackageKit dbus service found'))
except Exception as ex:
_logger_pk.info(_('PackageKit dbus service not found: %s'), ex)
self._pk = None
return self._pk
def get_candidates(self, package_name, factory, prefix):
"""Add any cached candidates.
The candidates are those discovered by a previous call to L{fetch_candidates}.
@param package_name: the distribution's name for the package
@type package_name: str
@param factory: a function to add a new implementation to the feed
@param prefix: the prefix for the implementation's ID
@type prefix: str"""
candidates = self._candidates.get(package_name, None)
if candidates is None:
return
if isinstance(candidates, tasks.Blocker):
return # Fetch still in progress
for candidate in candidates:
impl_name = '%s:%s:%s:%s' % (prefix, package_name, candidate['version'], candidate['arch'])
impl = factory(impl_name, only_if_missing = True, installed = candidate['installed'])
if impl is None:
# (checking this way because the cached candidate['installed'] may be stale)
return # Already installed
impl.version = model.parse_version(candidate['version'])
if candidate['arch'] != '*':
impl.machine = candidate['arch']
impl.download_sources.append(model.DistributionSource(package_name, candidate['size'], packagekit_id = candidate['packagekit_id']))
@tasks.async
def fetch_candidates(self, package_names):
"""@type package_names: [str]"""
assert self.pk
# Batch requests up
self._next_batch |= set(package_names)
yield
batched_package_names = self._next_batch
self._next_batch = set()
# The first fetch_candidates instance will now have all the packages.
# For the others, batched_package_names will now be empty.
# Fetch any we're missing.
self._fetch_batch(list(batched_package_names))
results = [self._candidates[p] for p in package_names]
# (use set because a single Blocker may be checking multiple
# packages and we need to avoid duplicates).
in_progress = list(set([b for b in results if isinstance(b, tasks.Blocker)]))
_logger_pk.debug('Currently querying PackageKit for: %s', in_progress)
while in_progress:
yield in_progress
in_progress = [b for b in in_progress if not b.happened]
def _fetch_batch(self, package_names):
"""Ensure that each of these packages is in self._candidates.
Start a new fetch if necessary. Ignore packages that are already downloaded or
in the process of being downloaded."""
# (do we need a 'force' argument here?)
package_names = [n for n in package_names if n not in self._candidates]
def do_batch(package_names):
#_logger_pk.info("sending %d packages in batch", len(package_names))
versions = {}
blocker = None
def error_cb(sender):
# Note: probably just means the package wasn't found
_logger_pk.info(_('Transaction failed: %s(%s)'), sender.error_code, sender.error_details)
blocker.trigger()
def details_cb(sender):
# The key can be a dbus.String sometimes, so convert to a Python
# string to be sure we get a match.
details = {}
for packagekit_id, d in sender.details.items():
details[unicode(packagekit_id)] = d
for packagekit_id in details:
if packagekit_id not in versions:
_logger_pk.info("Unexpected package info for '%s'; was expecting one of %r", packagekit_id, list(versions.keys()))
for packagekit_id, info in versions.items():
if packagekit_id in details:
info.update(details[packagekit_id])
info['packagekit_id'] = packagekit_id
if (info['name'] not in self._candidates or
isinstance(self._candidates[info['name']], tasks.Blocker)):
self._candidates[info['name']] = [info]
else:
self._candidates[info['name']].append(info)
else:
_logger_pk.info(_('Empty details for %s'), packagekit_id)
blocker.trigger()
def resolve_cb(sender):
if sender.package:
_logger_pk.debug(_('Resolved %r'), sender.package)
for packagekit_id, info in sender.package.items():
packagekit_id = unicode(packagekit_id) # Can be a dbus.String sometimes
parts = packagekit_id.split(';', 3)
if ':' in parts[3]:
parts[3] = parts[3].split(':', 1)[0]
packagekit_id = ';'.join(parts)
versions[packagekit_id] = info
tran = _PackageKitTransaction(self.pk, details_cb, error_cb)
tran.proxy.GetDetails(list(versions.keys()))
else:
_logger_pk.info(_('Empty resolve for %s'), package_names)
blocker.trigger()
# Send queries
blocker = tasks.Blocker('PackageKit %s' % package_names)
for package in package_names:
self._candidates[package] = blocker
try:
_logger_pk.debug(_('Ask for %s'), package_names)
tran = _PackageKitTransaction(self.pk, resolve_cb, error_cb)
tran.Resolve(package_names)
except:
__, ex, tb = sys.exc_info()
blocker.trigger((ex, tb))
raise
# Now we've collected all the requests together, split them up into chunks
# that PackageKit can handle ( < 100 per batch )
#_logger_pk.info("sending %d packages", len(package_names))
while package_names:
next_batch = package_names[:MAX_PACKAGE_KIT_TRANSACTION_SIZE]
package_names = package_names[MAX_PACKAGE_KIT_TRANSACTION_SIZE:]
do_batch(next_batch)
class PackageKitDownload(object):
def __init__(self, url, hint, pk, packagekit_id, expected_size):
"""@type url: str
@type packagekit_id: str
@type expected_size: int"""
self.url = url
self.status = download.download_fetching
self.hint = hint
self.aborted_by_user = False
self.downloaded = None
self.expected_size = expected_size
self.packagekit_id = packagekit_id
self._impl = hint
self._transaction = None
self.pk = pk
def error_cb(sender):
self.status = download.download_failed
ex = SafeException('PackageKit install failed: %s' % (sender.error_details or sender.error_code))
self.downloaded.trigger(exception = (ex, None))
def installed_cb(sender):
if hasattr(self._impl, 'installed'):
assert not self._impl.installed, self._impl
self._impl.installed = True
self._impl.distro.installed_fixup(self._impl)
self.status = download.download_complete
self.downloaded.trigger()
def install_packages():
package_name = self.packagekit_id
self._transaction = _PackageKitTransaction(self.pk, installed_cb, error_cb)
self._transaction.InstallPackages([package_name])
_auth_wrapper(install_packages)
self.downloaded = tasks.Blocker('PackageKit install %s' % self.packagekit_id)
def abort(self):
_logger_pk.debug(_('Cancel transaction'))
self.aborted_by_user = True
self._transaction.proxy.Cancel()
self.status = download.download_failed
self.downloaded.trigger()
def get_current_fraction(self):
"""@rtype: float"""
if self._transaction is None:
return None
percentage = self._transaction.getPercentage()
if percentage > 100:
return None
else:
return float(percentage) / 100.
def get_bytes_downloaded_so_far(self):
"""@rtype: int"""
fraction = self.get_current_fraction()
if fraction is None:
return 0
else:
if self.expected_size is None:
return 0
return int(self.expected_size * fraction)
def _auth_wrapper(method, *args):
try:
return method(*args)
except dbus.exceptions.DBusException as e:
if e.get_dbus_name() != \
'org.freedesktop.PackageKit.Transaction.RefusedByPolicy':
raise
iface, auth = e.get_dbus_message().split()
if not auth.startswith('auth_'):
raise
_logger_pk.debug(_('Authentication required for %s'), auth)
pk_auth = dbus.SessionBus().get_object(
'org.freedesktop.PolicyKit.AuthenticationAgent', '/',
'org.gnome.PolicyKit.AuthorizationManager.SingleInstance')
if not pk_auth.ObtainAuthorization(iface, dbus.UInt32(0),
dbus.UInt32(os.getpid()), timeout=300):
raise
return method(*args)
class _PackageKitTransaction(object):
def __init__(self, pk, finished_cb=None, error_cb=None):
self._finished_cb = finished_cb
self._error_cb = error_cb
self.error_code = None
self.error_details = None
self.package = {}
self.details = {}
self.files = {}
try:
# Put this first in case Ubuntu's aptdaemon doesn't like
# CreateTransaction.
tid = pk.GetTid()
self.have_0_8_1_api = False
except dbus.exceptions.DBusException:
tid = pk.CreateTransaction()
self.have_0_8_1_api = True
self.object = dbus.SystemBus().get_object(
'org.freedesktop.PackageKit', tid, False)
self.proxy = dbus.Interface(self.object,
'org.freedesktop.PackageKit.Transaction')
self._props = dbus.Interface(self.object, dbus.PROPERTIES_IFACE)
self._signals = []
for signal, cb in [('Finished', self.__finished_cb),
('ErrorCode', self.__error_code_cb),
('StatusChanged', self.__status_changed_cb),
('Package', self.__package_cb),
('Details', self.__details_cb),
('Files', self.__files_cb)]:
self._signals.append(self.proxy.connect_to_signal(signal, cb))
defaultlocale = locale.getdefaultlocale()[0]
if defaultlocale is not None:
self.compat_call([
('SetHints', ['locale=%s' % defaultlocale]),
('SetLocale', defaultlocale),
])
def getPercentage(self):
"""@rtype: int"""
result = self.get_prop('Percentage')
if result is None:
result, __, __, __ = self.proxy.GetProgress()
return result
def get_prop(self, prop, default = None):
"""@type prop: str"""
try:
return self._props.Get('org.freedesktop.PackageKit.Transaction', prop)
except:
return default
# note: Ubuntu's aptdaemon implementation of PackageKit crashes if passed the wrong
# arguments (rather than returning InvalidArgs), so always try its API first.
def compat_call(self, calls):
for call in calls:
method = call[0]
args = call[1:]
try:
dbus_method = self.proxy.get_dbus_method(method)
return dbus_method(*args)
except dbus.exceptions.DBusException as e:
if e.get_dbus_name() not in (
'org.freedesktop.DBus.Error.UnknownMethod',
'org.freedesktop.DBus.Error.InvalidArgs'):
raise
raise Exception('Cannot call %r DBus method' % calls)
def __finished_cb(self, exit, runtime):
"""@type exit: str
@type runtime: int"""
_logger_pk.debug(_('Transaction finished: %s'), exit)
for i in self._signals:
i.remove()
if self.error_code is not None:
self._error_cb(self)
else:
self._finished_cb(self)
def __error_code_cb(self, code, details):
_logger_pk.info(_('Transaction failed: %s(%s)'), details, code)
self.error_code = code
self.error_details = details
def __package_cb(self, status, id, summary):
"""@type status: str
@type id: str
@type summary: str"""
try:
from zeroinstall.injector import distro
package_name, version, arch, repo_ = id.split(';')
clean_version = distro.try_cleanup_distro_version(version)
if not clean_version:
_logger_pk.info(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package_name})
return
clean_arch = distro.canonical_machine(arch)
package = {'version': clean_version,
'name': package_name,
'arch': clean_arch,
'installed': (status == 'installed')}
_logger_pk.debug(_('Package: %s %r'), id, package)
self.package[str(id)] = package
except Exception as ex:
_logger_pk.warn("__package_cb(%s, %s, %s): %s", status, id, summary, ex)
def __details_cb(self, id, licence, group, detail, url, size):
"""@type id: str
@type licence: str
@type group: str
@type detail: str
@type url: str
@type size: int"""
details = {'licence': str(licence),
'group': str(group),
'detail': str(detail),
'url': str(url),
'size': int(size)}
_logger_pk.debug(_('Details: %s %r'), id, details)
self.details[id] = details
def __files_cb(self, id, files):
self.files[id] = files.split(';')
def __status_changed_cb(self, status):
"""@type status: str"""
pass
def Resolve(self, package_names):
"""@type package_names: [str]"""
if self.have_0_8_1_api:
self.proxy.Resolve(dbus.UInt64(0), package_names)
else:
self.proxy.Resolve('none', package_names)
def InstallPackages(self, package_names):
"""@type package_names: [str]"""
if self.have_0_8_1_api:
self.proxy.InstallPackages(dbus.UInt64(0), package_names)
else:
self.compat_call([
('InstallPackages', False, package_names),
('InstallPackages', package_names),
])
| lgpl-2.1 |
googlearchive/storage-getting-started-python | gcs/gcs_error.py | 2 | 1381 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Storage error class."""
__author__ = 'kbrisbin@google.com (Kathryn Hurley)'
class GcsError(Exception):
"""Exception raised when API call does not return a 20x status.
Attributes:
status: The string status of the HTTP response.
message: A string message explaining the error.
"""
def __init__(self, status, message):
"""Inits GcsError with status and message.
Args:
status: String status of the HTTP response.
message: A string message explaining the error.
"""
self.status = status
self.message = message
def __str__(self):
"""Displays the error as <status>: <error message>.
Returns:
The string representation of the error.
"""
return '%s: %s' % (repr(self.status), repr(self.message))
| apache-2.0 |
hmpf/nav | doc/conf.py | 1 | 9193 | # -*- coding: utf-8 -*-
#
# NAV documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 8 10:54:59 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(os.path.abspath('..'), 'python'))
sys.path.insert(0, os.path.abspath("exts"))
from nav import buildconf
from nav import bootstrap
bootstrap.bootstrap_django('doc')
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'xref']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'NAV'
copyright = u'2012-2019, Uninett AS'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev'
#version = '.'.join(buildconf.VERSION.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = buildconf.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'bootstrap'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [os.path.join(os.path.abspath(os.path.dirname(__file__)), "templates")]
html_logo = "templates/bootstrap/static/nav-logo.svg"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': " ",
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Contents",
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
#'navbar_links': [
# ("Examples", "examples"),
# ("Link", "http://example.com", True),
#],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': True,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': True,
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "navbar-brand",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "flatly",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'NAVdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'NAV.tex', u'NAV Documentation',
u'Uninett AS', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
# External links definitions
xref_links = {
"Graphite": ("Graphite", "https://graphiteapp.org"),
"PostgreSQL": ("PostgreSQL", "https://www.postgresql.org"),
}
def setup(app):
app.add_stylesheet("custom.css")
| gpl-3.0 |
osstech-jp/samba | third_party/dnspython/dns/rdtypes/IN/IPSECKEY.py | 100 | 5993 | # Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| gpl-3.0 |
thnee/ansible | test/units/modules/network/cnos/test_cnos_static_route.py | 38 | 3285 | # (c) 2016 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.cnos import cnos_static_route
from .cnos_module import TestCnosModule, load_fixture
from units.modules.utils import set_module_args
class TestCnosStaticRouteModule(TestCnosModule):
module = cnos_static_route
def setUp(self):
super(TestCnosStaticRouteModule, self).setUp()
self.mock_exec_command = patch('ansible.modules.network.cnos.cnos_banner.exec_command')
self.exec_command = self.mock_exec_command.start()
self.mock_load_config = patch('ansible.modules.network.cnos.cnos_static_route.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.cnos.cnos_static_route.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestCnosStaticRouteModule, self).tearDown()
self.mock_exec_command.stop()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('cnos_static_route.cfg').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_cnos_static_route_present(self):
set_module_args(dict(prefix='10.241.107.20', mask='255.255.255.0', next_hop='10.241.106.1'))
self.execute_module(changed=True, commands=['ip route 10.241.107.20 255.255.255.0 10.241.106.1 1'])
def test_cnos_static_route_present_no_defaults(self):
set_module_args(dict(prefix='10.241.106.4', mask='255.255.255.0', next_hop='1.2.3.5',
description='testing', admin_distance=100))
self.execute_module(changed=True,
commands=['ip route 10.241.106.4 255.255.255.0 1.2.3.5 100 description testing'])
def test_cnos_static_route_change(self):
set_module_args(dict(prefix='10.10.30.64', mask='255.255.255.0', next_hop='1.2.4.8'))
self.execute_module(changed=True,
commands=['ip route 10.10.30.64 255.255.255.0 1.2.4.8 1'])
def test_cnos_static_route_absent(self):
set_module_args(dict(prefix='10.10.30.12',
mask='255.255.255.0', next_hop='1.2.4.8', state='absent'))
self.execute_module(changed=True,
commands=['no ip route 10.10.30.12 255.255.255.0 1.2.4.8 1'])
| gpl-3.0 |
sertac/django | tests/utils_tests/test_jslex.py | 169 | 9708 | # -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", ["dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40", "dnum 1e1", "dnum .123"]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """,
[r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""", r'''string "'"''', r"""string '\''""", r'''string "\""''']),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this", "punct .",
"id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"', "punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| bsd-3-clause |
cromlyngames/Cardiff_rain_storage | rain_storage/rain_variance_calibrator.py | 1 | 5413 | #! /usr/bin/env python3
## example_function_name
## ExampleVariableName
import random
## targets based on met office monthly data
## Row Labels Average of rain StdDev of rain StdDevp of rain
## 1 127.425 60.61179013 59.84934732
## 2 89.0475 49.09225218 48.47471499
## 3 88.52564103 44.66502646 44.08867939
## 4 65.86666667 38.2410384 37.74758497
## 5 75.72307692 40.7776736 40.25148802
## 6 69.77435897 41.79887758 41.25951463
## 7 76.62564103 44.51739883 43.94295672
## 8 95.01282051 58.55597942 57.80038674
## 9 87.6225 49.43868768 48.81679264
## 10 124.3675 57.02466128 56.30734138
## 11 125.6875 50.63449816 49.99756088
## 12 131.4 57.07990351 56.36188872
## decision. use stdev, not stdevp. Data was last twenty years, but results outside of the sample are very possible .
##
## hypothesis. the rainfall on two days in the same month is largely indepentdent
## therefore The variance of rainfall between two januaries will be smaller than
## the variance in rainfall between two rainy days in january.
## the vairance of the two would be related.
## Method - generate probailty tables for simple, round, rainfalls.
## Run for 50 years.
## Test against average and stdev against met office values
## If much better than previous option, keep it. If much worse, revert. If mixed, mix the probality tables (genetic alog whoo!)
TargetAverage=[
0,
127.4250, 89.0475, 88.5256, 65.8666,
75.7230, 69.7744, 76.6256, 95.0128,
87,6225, 124.3675, 125.6875, 131.4000
]
## initalise table of probabilities of rain amounts
StartMonthDict={
## rain in mm : probability of it happening
0.1 : 0.09,
0.5 : 0.11,
1.0 : 0.08,
2.0 : 0.12,
4.0 : 0.07,
8.0 : 0.13,
16.0 : 0.06,
32.0 : 0.14,
64.0 : 0.05,
128.0 : 0.15
}
JanDict={}
FebDict={}
MarDict={}
AprDict={}
MayDict={}
JunDict={}
JulDict={}
AugDict={}
SepDict={}
OctDict={}
NovDict={}
DecDict={}
MonthDictLookupDict = {
1: JanDict,
2: FebDict,
3: MarDict,
4: AprDict,
5: MayDict,
6: JunDict,
7: JulDict,
8: AugDict,
9: SepDict,
10:OctDict,
11:NovDict,
12:DecDict
}
def day_of_rainfall(MonthX, YearsRain):
Target = random.random()
diff = float('inf')
MonthDict = MonthDictLookupDict[MonthX]
for key,value in MonthDict.items():
if diff > abs(Target-value):
diff = abs(Target-value)
RainInMM = key
print(Target, key, value, diff, RainInMM)
YearsRain[MonthX]=round(YearsRain[MonthX] + RainInMM, 4)
return(YearsRain)
def get_block_of_days(MonthX):
## generates a block of days until it rains again
## current iteration ignores the month and returns average iteration for watering months
## this underestimates frequency of rain in winter
BlockFreqLookupTable={
0 : 0.684449093444909,
1 : 0.118200836820084,
2 : 0.0753138075313808,
3 : 0.0352161785216179,
4 : 0.0209205020920502,
5 : 0.0135983263598326,
6 : 0.0125523012552301,
7 : 0.00906555090655509,
8 : 0.00488145048814505,
9 : 0.00488145048814505,
10 : 0.00627615062761506,
11 : 0.00453277545327755,
12 : 0.00278940027894003,
13 : 0.00209205020920502,
14 : 0.00139470013947001,
15 : 0.000697350069735007,
16 : 0.000697350069735007,
17 : 0.00104602510460251,
18 : 0.000348675034867503,
19 : 0.000348675034867503,
20 : 0.000697350069735007,
21 : 0,
}
Target = random.random()
diff = float('inf')
for key,value in BlockFreqLookupTable.items():
if diff > abs(Target-value):
diff = abs(Target-value)
BlockOfDays = key
return(BlockOfDays)
## 0 means it rains the next day as well as today.
## yes, in my 20 year data set, cardiff has NEVER had 3 weeks or more without rain.
def update_month(DayCount):
x= 0
DaysPerMonth = [
0,
31, 28, 31,
30, 31, 30,
31, 30, 31,
30, 31, 30,
25
]
while DayCount >0:
x=x+1
if x==13:
x = 1
# print(x, DayCount)
DayCount = DayCount - DaysPerMonth[x]
MonthX = x
return(MonthX)
## Program Start
## Properly intialise the monthly dicts:
## this is used to allow the start shape of the different rain slots to be easily modified.
for MonthDicts in MonthDictLookupDict:
for RainKeys in StartMonthDict:
#print(RainKeys, StartMonthDict[RainKeys])
ThisMonthDict = MonthDictLookupDict[MonthDicts]
ThisMonthDict[RainKeys] = StartMonthDict[RainKeys]
ErasRain = [[0, 0,0,0,0,0,0, 0,0,0,0,0,0]]
for year in range (1,5):
DayCounter= 1
MonthX =1
YearsRain = [0, 0,0,0,0,0,0, 0,0,0,0,0,0]
while DayCounter < 365:
DayCounterCheck = DayCounter + get_block_of_days(MonthX)
if DayCounterCheck > 365:
## unlike the watering program, only need to see if rain at end of this block is outside the year of record.
break
## no watering functions needed
DayCounter = DayCounterCheck+1
MonthX= update_month(DayCounter)
YearsRain = day_of_rainfall(MonthX, YearsRain)
#print(YearsRain)
ErasRain.append(YearsRain)
print(ErasRain)
| gpl-3.0 |
Alwnikrotikz/paimei | console/modules/_PAIMEIpeek/EditReconDlg.py | 1 | 8473 | #
# PaiMei
# Copyright (C) 2006 Pedram Amini <pedram.amini@gmail.com>
#
# $Id$
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
'''
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: pedram.amini@gmail.com
@organization: www.openrce.org
'''
import wx
import wx.lib.dialogs
import MySQLdb
class EditReconDlg(wx.Dialog):
def __init__(self, *args, **kwds):
self.parent = kwds["parent"]
self.top = self.parent.top
self.choices = ["new", "uncontrollable", "clear", "unsure", "vulnerable"]
# begin wxGlade: EditReconDlg.__init__
kwds["style"] = wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.stack_depth_static_staticbox = wx.StaticBox(self, -1, "Stack Depth:")
self.reason_static_staticbox = wx.StaticBox(self, -1, "Reason:")
self.status_static_staticbox = wx.StaticBox(self, -1, "Status")
self.username_static_staticbox = wx.StaticBox(self, -1, "Username")
self.notes_sizer_staticbox = wx.StaticBox(self, -1, "Notes:")
self.address_static_staticbox = wx.StaticBox(self, -1, "Address:")
self.address = wx.TextCtrl(self, -1, "")
self.stack_depth = wx.SpinCtrl(self, -1, "3", min=0, max=99)
self.reason = wx.TextCtrl(self, -1, "")
self.status = wx.Choice(self, -1, choices=self.choices)
self.username = wx.TextCtrl(self, -1, "")
self.notes = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE|wx.HSCROLL)
self.save = wx.Button(self, -1, "Save")
self.cancel = wx.Button(self, wx.ID_CANCEL)
self.__set_properties()
self.__do_layout()
# end wxGlade
# event bindings.
self.Bind(wx.EVT_BUTTON, self.on_button_save, self.save)
def __set_properties(self):
# begin wxGlade: EditReconDlg.__set_properties
self.SetTitle("Edit Recon Point")
self.SetSize((500, 500))
self.status.SetSelection(-1)
self.notes.SetFont(wx.Font(8, wx.MODERN, wx.NORMAL, wx.NORMAL, 0, "Lucida Console"))
# end wxGlade
def __do_layout(self):
# begin wxGlade: EditReconDlg.__do_layout
overall = wx.BoxSizer(wx.VERTICAL)
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
notes_sizer = wx.StaticBoxSizer(self.notes_sizer_staticbox, wx.HORIZONTAL)
username_static = wx.StaticBoxSizer(self.username_static_staticbox, wx.HORIZONTAL)
status_static = wx.StaticBoxSizer(self.status_static_staticbox, wx.HORIZONTAL)
reason_static = wx.StaticBoxSizer(self.reason_static_staticbox, wx.HORIZONTAL)
stack_depth_static = wx.StaticBoxSizer(self.stack_depth_static_staticbox, wx.HORIZONTAL)
address_static = wx.StaticBoxSizer(self.address_static_staticbox, wx.HORIZONTAL)
address_static.Add(self.address, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(address_static, 1, wx.EXPAND, 0)
stack_depth_static.Add(self.stack_depth, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(stack_depth_static, 1, wx.EXPAND, 0)
reason_static.Add(self.reason, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(reason_static, 1, wx.EXPAND, 0)
status_static.Add(self.status, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(status_static, 1, wx.EXPAND, 0)
username_static.Add(self.username, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(username_static, 1, wx.EXPAND, 0)
notes_sizer.Add(self.notes, 3, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(notes_sizer, 4, wx.EXPAND, 0)
buttons_sizer.Add(self.save, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
buttons_sizer.Add(self.cancel, 1, wx.EXPAND|wx.ADJUST_MINSIZE, 0)
overall.Add(buttons_sizer, 1, wx.EXPAND, 0)
self.SetAutoLayout(True)
self.SetSizer(overall)
self.Layout()
# end wxGlade
####################################################################################################################
def on_button_save (self, event):
'''
Grab the form values and add a new entry to the database.
'''
try:
address = long(self.address.GetLineText(0), 16)
except:
dlg = wx.MessageDialog(self, "Invalid 'address' value, expecting a DWORD. Ex: 0xdeadbeef", "Error", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
return
try:
stack_depth = int(self.stack_depth.GetValue())
except:
dlg = wx.MessageDialog(self, "Must specify an integer for 'stack depth'.", "Error", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
return
status = self.choices[self.status.GetSelection()]
username = self.username.GetLineText(0)
reason = self.reason.GetLineText(0)
notes = self.notes.GetValue()
# must at least have a reason. notes are optional.
if not reason:
dlg = wx.MessageDialog(self, "Must specify a 'reason'.", "Error", wx.OK | wx.ICON_WARNING)
dlg.ShowModal()
return
sql = " UPDATE pp_recon"
sql += " SET module_id = '%d'," % self.top.module["id"]
sql += " offset = '%d'," % (address - self.top.module["base"])
sql += " stack_depth = '%d'," % stack_depth
sql += " reason = '%s'," % reason.replace("\\", "\\\\").replace("'", "\\'")
sql += " status = '%s'," % status
sql += " username = '%s'," % username
sql += " notes = '%s'" % notes.replace("\\", "\\\\").replace("'", "\\'")
sql += " WHERE id = '%d'" % self.recon_id
cursor = self.top.main_frame.mysql.cursor()
try:
cursor.execute(sql)
except MySQLdb.Error, e:
msg = "MySQL error %d: %s\n" % (e.args[0], e.args[1])
msg += sql
dlg = wx.lib.dialogs.ScrolledMessageDialog(self, msg, "Failed Adding RECON Point")
dlg.ShowModal()
# reload the recon list control. we reload instead of updating the control to partially solve
# contention issues when multiple users are hitting the database at the same time.
self.top.recon.load(self.top.module["id"])
self.Destroy()
####################################################################################################################
def propagate (self, recon_id):
'''
Propagate the control values from the database. We grab from the database as opposed the the reconlistrctrl
to ensure that we get the latest goods.
'''
# save this for later.
self.recon_id = recon_id
# create a mysql cursor and grab the db entry for this recon id.
cursor = self.top.main_frame.mysql.cursor(MySQLdb.cursors.DictCursor)
try:
cursor.execute("SELECT * FROM pp_recon WHERE id = '%d'" % recon_id)
except MySQLdb.Error, e:
msg = "MySQL error %d: %s\n" % (e.args[0], e.args[1])
msg += sql
dlg = wx.lib.dialogs.ScrolledMessageDialog(self, msg, "Failed Editing RECON Point")
dlg.ShowModal()
self.Destroy()
recon = cursor.fetchone()
self.address.SetValue("0x%08x" % (recon["offset"] + self.top.module["base"]))
self.stack_depth.SetValue(recon["stack_depth"])
self.reason.SetValue(recon["reason"])
self.status.SetSelection(self.choices.index(recon["status"]))
self.username.SetValue(recon["username"])
self.notes.SetValue(recon["notes"])
| gpl-2.0 |
jicruz/heroku-bot | lib/youtube_dl/extractor/mwave.py | 64 | 3279 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_duration,
)
class MwaveIE(InfoExtractor):
_VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?mnettv/videodetail\.m\?searchVideoDetailVO\.clip_id=(?P<id>[0-9]+)'
_URL_TEMPLATE = 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=%s'
_TESTS = [{
'url': 'http://mwave.interest.me/mnettv/videodetail.m?searchVideoDetailVO.clip_id=168859',
# md5 is unstable
'info_dict': {
'id': '168859',
'ext': 'flv',
'title': '[M COUNTDOWN] SISTAR - SHAKE IT',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'M COUNTDOWN',
'duration': 206,
'view_count': int,
}
}, {
'url': 'http://mwave.interest.me/en/mnettv/videodetail.m?searchVideoDetailVO.clip_id=176199',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
vod_info = self._download_json(
'http://mwave.interest.me/onair/vod_info.m?vodtype=CL§orid=&endinfo=Y&id=%s' % video_id,
video_id, 'Download vod JSON')
formats = []
for num, cdn_info in enumerate(vod_info['cdn']):
stream_url = cdn_info.get('url')
if not stream_url:
continue
stream_name = cdn_info.get('name') or compat_str(num)
f4m_stream = self._download_json(
stream_url, video_id,
'Download %s stream JSON' % stream_name)
f4m_url = f4m_stream.get('fileurl')
if not f4m_url:
continue
formats.extend(
self._extract_f4m_formats(f4m_url + '&hdcore=3.0.3', video_id, f4m_id=stream_name))
self._sort_formats(formats)
return {
'id': video_id,
'title': vod_info['title'],
'thumbnail': vod_info.get('cover'),
'uploader': vod_info.get('program_title'),
'duration': parse_duration(vod_info.get('time')),
'view_count': int_or_none(vod_info.get('hit')),
'formats': formats,
}
class MwaveMeetGreetIE(InfoExtractor):
_VALID_URL = r'https?://mwave\.interest\.me/(?:[^/]+/)?meetgreet/view/(?P<id>\d+)'
_TESTS = [{
'url': 'http://mwave.interest.me/meetgreet/view/256',
'info_dict': {
'id': '173294',
'ext': 'flv',
'title': '[MEET&GREET] Park BoRam',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'Mwave',
'duration': 3634,
'view_count': int,
}
}, {
'url': 'http://mwave.interest.me/en/meetgreet/view/256',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
clip_id = self._html_search_regex(
r'<iframe[^>]+src="/mnettv/ifr_clip\.m\?searchVideoDetailVO\.clip_id=(\d+)',
webpage, 'clip ID')
clip_url = MwaveIE._URL_TEMPLATE % clip_id
return self.url_result(clip_url, 'Mwave', clip_id)
| gpl-3.0 |
CyanideL/android_kernel_samsung_jf | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/gis/gdal/prototypes/geom.py | 315 | 4821 | import re
from datetime import date
from ctypes import c_char, c_char_p, c_double, c_int, c_ubyte, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal, GEOJSON
from django.contrib.gis.gdal.prototypes.errcheck import check_bool, check_envelope
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, string_output, void_output
### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errchck = check_bool
return f
### OGR_G ctypes function prototypes ###
# GeoJSON routines, if supported.
if GEOJSON:
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True)
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True)
else:
from_json = False
to_json = False
to_kml = False
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)])
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True)
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p])
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(lgdal.OGR_G_GetPoint, [c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| apache-2.0 |
ksrajkumar/openerp-6.1 | openerp/addons/point_of_sale/report/pos_receipt.py | 9 | 3023 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from report import report_sxw
import pooler
def titlize(journal_name):
words = journal_name.split()
while words.pop() != 'journal':
continue
return ' '.join(words)
class order(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(order, self).__init__(cr, uid, name, context=context)
user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid)
partner = user.company_id.partner_id
self.localcontext.update({
'time': time,
'disc': self.discount,
'net': self.netamount,
'get_journal_amt': self._get_journal_amt,
'address': partner.address and partner.address[0] or False,
'titlize': titlize
})
def netamount(self, order_line_id):
sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s'
self.cr.execute(sql, (order_line_id,))
res = self.cr.fetchone()
return res[0]
def discount(self, order_id):
sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s '
self.cr.execute(sql, (order_id,))
res = self.cr.fetchall()
dsum = 0
for line in res:
if line[0] != 0:
dsum = dsum +(line[2] * (line[0]*line[1]/100))
return dsum
def _get_journal_amt(self, order_id):
data={}
sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs
LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id
LEFT JOIN account_journal as aj ON aj.id = abs.journal_id
WHERE absl.pos_statement_id =%d"""%(order_id)
self.cr.execute(sql)
data = self.cr.dictfetchall()
return data
report_sxw.report_sxw('report.pos.receipt', 'pos.order', 'addons/point_of_sale/report/pos_receipt.rml', parser=order, header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
Passtechsoft/TPEAlpGen | blender/scons/scons-local/SCons/Tool/cvf.py | 6 | 2463 | """engine.SCons.Tool.cvf
Tool-specific initialization for the Compaq Visual Fortran compiler.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/cvf.py 2014/07/05 09:42:21 garyo"
import fortran
compilers = ['f90']
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = ''
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
luisibanez/SahanaEden | modules/geopy/parsers/gpx.py | 25 | 12748 | from geopy import Point
from geopy.util import NULL_HANDLER
from geopy.parsers.iso8601 import parse_iso8601
import sys, re, logging
from xml.etree import ElementTree
log = logging.getLogger(__name__)
log.addHandler(NULL_HANDLER)
class VersionError(Exception):
pass
class Waypoint(Point):
'''
A `Waypoint` is a geopy `Point` with additional waypoint metadata as
defined by the GPX format specification.
'''
@classmethod
def from_xml_names(cls, attrs, children):
'''
Construct a new Waypoint from dictionaries of attribute and child
element names corresponding to GPX waypoint information, as parsed
by the `GPX` class.
'''
lat = attrs['lat']
lon = attrs['lon']
if 'ele' in children:
ele = children['ele']
else:
ele = None
w = cls(lat, lon, ele)
if 'time' in children:
w.timestamp = children['time']
if 'name' in children:
w.name = children['name']
if 'desc' in children:
w.description = children['desc']
if 'cmt' in children:
w.comment = children['cmt']
if 'src' in children:
w.source = children['src']
if 'sym' in children:
w.symbol = children['sym']
if 'type' in children:
w.classification = children['type']
if 'fix' in children:
w.fix = children['fix']
if 'sat' in children:
w.num_satellites = children['sat']
if 'ageofdgpsdata' in children:
w.age = children['ageofdgpsdata']
if 'dgpsid' in children:
w.dgpsid = children['dgpsid']
return w
class _Attr(object):
'''
Value wrapper for allowing interfaces to access attribute values with
`obj.text`
'''
def __init__(self, value):
self.text = value
class GPX(object):
GPX_NS = "http://www.topografix.com/GPX/1/1"
FILE_EXT = '.gpx'
MIME_TYPE = 'application/gpx+xml'
VERSION = '1.1'
FIX_TYPES = set(('none', '2d', '3d', 'dgps', 'pps'))
DECIMAL_RE = re.compile(r'([+-]?\d*\.?\d+)$')
# Each "type tuple" is a tuple of two items:
# 1. Dictionary of attributes in the type
# 2. Dictionary of child elements that can appear in the type
GPX_TYPE = ({'version': 'string', 'creator': 'string'}, {
'metadata': 'metadata', 'wpt': ['waypoint'], 'rte': ['route'],
'trk': ['track'], 'extensions': 'extensions'
})
METADATA_TYPE = ({}, {
'name': 'string', 'desc': 'string', 'author': 'person',
'copyright': 'copyright', 'link': ['link'], 'time': 'datetime',
'keywords': 'string', 'bounds': 'bounds', 'extensions': 'extensions'
})
WAYPOINT_TYPE = ({'lat': 'decimal', 'lon': 'decimal'}, {
'ele': 'decimal', 'time': 'datetime', 'magvar': 'degrees',
'geoidheight': 'decimal', 'name': 'string', 'cmt': 'string',
'desc': 'string', 'src': 'string', 'link': ['link'], 'sym': 'string',
'type': 'string', 'fix': 'fix', 'sat': 'unsigned', 'hdop': 'decimal',
'vdop': 'decimal', 'pdop': 'decimal', 'ageofdgpsdata': 'decimal',
'dgpsid': 'dgpsid', 'extensions': 'extensions'
})
ROUTE_TYPE = ({}, {
'name': 'string', 'cmt': 'string', 'desc': 'string', 'src': 'string',
'link': ['link'], 'number': 'unsigned', 'type': 'string',
'extensions': 'extensions', 'rtept': ['waypoint']
})
TRACK_TYPE = ({}, {
'name': 'string', 'cmt': 'string', 'desc': 'string', 'src': 'string',
'link': ['link'], 'number': 'unsigned', 'type': 'string',
'extensions': 'extensions', 'trkseg': ['segment']
})
TRACK_SEGMENT_TYPE = ({},
{'trkpt': ['waypoint'], 'extensions': 'extensions'}
)
COPYRIGHT_TYPE = (
{'author': 'string'}, {'year': 'year', 'license': 'uri'}
)
LINK_TYPE = ({'href': 'uri'}, {'text': 'string', 'type': 'string'})
EMAIL_TYPE = ({'id': 'string', 'domain': 'string'}, {})
PERSON_TYPE = ({}, {'name': 'string', 'email': 'email', 'link': 'link'})
POINT_TYPE = ({'lat': 'longitude', 'lon': 'longitude'},
{'ele': 'decimal', 'time': 'datetime'}
)
POINT_SEGMENT_TYPE = ({}, {'pt': ['point']})
BOUNDS_TYPE = ({
'minlat': 'latitude', 'minlon': 'longitude',
'maxlat': 'latitude', 'maxlon': 'longitude'
}, {})
def __init__(self, document=None, cache=True):
self.cache = cache
self._waypoints = {}
self._routes = {}
self._tracks = {}
self.type_handlers = {
'string': lambda e: e.text,
'uri': lambda e: e.text,
'datetime': self._parse_datetime_element,
'decimal': self._parse_decimal,
'dgpsid': self._parse_dgps_station,
'email': self._parse_email,
'link': self._parse_link,
'year': self._parse_int,
'waypoint': self._parse_waypoint,
'segment': self._parse_segment,
'unsigned': self._parse_unsigned,
'degrees': self._parse_degrees,
'fix': self._parse_fix,
'extensions': self._parse_noop,
}
if document is not None:
self.open(document)
def open(self, string_or_file):
if isinstance(string_or_file, basestring):
string_or_file = ElementTree.fromstring(string_or_file)
elif not ElementTree.iselement(string_or_file):
string_or_file = ElementTree.parse(string_or_file)
if string_or_file.getroot().tag == self._get_qname('gpx'):
self._root = string_or_file.getroot()
@property
def version(self):
if not hasattr(self, '_version'):
version = self._root.get('version')
if version == self.VERSION:
self._version = version
else:
raise VersionError("%r" % (version,))
return self._version
@property
def creator(self):
if not hasattr(self, '_creator'):
self._creator = self._root.get('creator')
return self._creator
@property
def metadata(self):
if not hasattr(self, '_metadata'):
metadata_qname = self._get_qname('metadata')
metadata = {}
element = self._root.find(metadata_qname)
if element is not None:
single, multi = self.METADATA
metadata.update(self._child_dict(element, single, multi))
for tag in ('name', 'desc', 'time', 'keywords'):
if tag in metadata:
metadata[tag] = metadata[tag]
if 'time' in metadata:
metadata['time'] = self._parse_datetime(metadata['time'])
self._metadata = metadata
return self._metadata
@property
def waypoints(self):
tag = self._get_qname('wpt')
return self._cache_parsed(tag, self._parse_waypoint, self._waypoints)
def _parse_waypoint(self, element):
waypoint = {}
point = Point(element.get('lat'), element.get('lon'))
def _parse_segment(self, element):
pass
@property
def routes(self):
tag = self._get_qname('rte')
return self._cache_parsed(tag, self._parse_route, self._routes)
def _parse_route(self, element):
pass
@property
def route_names(self):
for route in self._root.findall(self._get_qname('rte')):
yield route.findtext(self._get_qname('name'))
@property
def waypoints(self):
return self.get_waypoints()
def get_waypoints(self, route=None):
if route is None:
root = self._root
waypoint_name = self._get_qname('wpt')
else:
root = self.get_route_by_name(route)
waypoint_name = self._get_qname('rtept')
for rtept in root.findall(waypoint_name):
attrs, children = self._parse_type(rtept, self.WAYPOINT_TYPE)
yield Waypoint.from_xml_names(attrs, children)
def get_route_by_name(self, route):
if isinstance(route, basestring):
name = route
index = 0
else:
name, index = route
seen_index = 0
for rte in self._root.findall(self._get_qname('rte')):
rname = rte.findtext(self._get_qname('name'))
if rname == name:
if not seen_index == index:
seen_index = seen_index + 1
else:
return rte
return None
@property
def tracks(self):
tag = self._get_qname('rte')
return self._cache_parsed(tag, self._parse_track, self._tracks)
def _parse_track(self, element):
pass
def _parse_type(self, element, type_def):
attr_types, child_types = type_def
attrs = {}
children = {}
for attr, handler in attr_types.iteritems():
value = element.get(attr)
type_func = self.type_handlers[handler]
attrs[attr] = type_func(_Attr(value))
for tag, handler in child_types.iteritems():
values = []
all = False
if isinstance(handler, list):
all = True
type_func = self.type_handlers[handler[0]]
else:
type_func = self.type_handlers[handler]
for e in element.findall(self._get_qname(tag)):
values.append(type_func(e))
if len(values) > 0:
if all:
children[tag] = values
else:
children[tag] = values[-1]
return attrs, children
@property
def extensions(self):
extensions_qname = self._get_qname('extensions')
def _cache_parsed(self, tag, parse_func, cache):
i = -1
for i in xrange(len(cache)):
item = cache[i]
if item is not None:
yield item
for element in self._root:
if element.tag == tag:
i += 1
item = parse_func(element)
if self.cache:
cache[i] = item
if item is not None:
yield item
def _parse_decimal(self, element):
value = element.text
match = re.match(self.DECIMAL_RE, value)
if match:
return float(match.group(1))
else:
raise ValueError("Invalid decimal value: %r" % (value,))
def _parse_degrees(self, element):
value = self._parse_decimal(element)
if 0 <= value <= 360:
return value
else:
raise ValueError("Value out of range [0, 360]: %r" % (value,))
def _parse_dgps_station(self, element):
value = int(element.text)
if 0 <= value <= 1023:
return value
else:
raise ValueError("Value out of range [0, 1023]: %r" % (value,))
def _parse_datetime(self, value):
return parse_iso8601(value)
def _parse_datetime_element(self, element):
return self._parse_datetime(element.text)
def _parse_email(self, element):
value = element.text
if not value:
name = element.get('id')
domain = element.get('domain')
if name and domain:
return '@'.join((name, domain))
return value or None
def _parse_link(self, element):
pass
def _parse_int(self, element):
return int(element.text)
def _parse_unsigned(self, element):
return int(element.text)
def _parse_fix(self, element):
value = element.text
if value in self.FIX_TYPES:
return value
else:
raise ValueError("Value is not a valid fix type: %r" % (value,))
def _parse_string(self, element):
return element.text
def _parse_noop(self, element):
return element
def _child_dict(self, element, single, multi):
single = dict([(self._get_qname(tag), tag) for tag in single])
multi = dict([(self._get_qname(tag), tag) for tag in multi])
limit = len(single)
d = {}
if limit or multi:
for child in element:
if child.tag in single:
name = single.pop(child.tag)
d[name] = child
limit -= 1
elif child.tag in multi:
name = multi[child.tag]
d.setdefault(name, []).append(child)
if not limit and not multi:
break
return d
def _get_qname(self, name):
return "{%s}%s" % (self.GPX_NS, name)
| mit |
django-nonrel/django | tests/template_tests/test_nodelist.py | 122 | 2560 | from django.template import VariableNode, Context
from django.template.loader import get_template_from_string
from django.utils.unittest import TestCase
from django.test.utils import override_settings
class NodelistTest(TestCase):
def test_for(self):
source = '{% for i in 1 %}{{ a }}{% endfor %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
source = '{% if x %}{{ a }}{% endif %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
source = '{% ifequal x y %}{{ a }}{% endifequal %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
source = '{% ifchanged x %}{{ a }}{% endifchanged %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
class ErrorIndexTest(TestCase):
"""
Checks whether index of error is calculated correctly in
template debugger in for loops. Refs ticket #5831
"""
@override_settings(DEBUG=True, TEMPLATE_DEBUG = True)
def test_correct_exception_index(self):
tests = [
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)),
('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)),
('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)),
('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)),
]
context = Context({
'range': range(5),
'five': 5,
})
for source, expected_error_source_index in tests:
template = get_template_from_string(source)
try:
template.render(context)
except (RuntimeError, TypeError) as e:
error_source_index = e.django_template_source[1]
self.assertEqual(error_source_index,
expected_error_source_index)
| bsd-3-clause |
Nicolas570/chris_db | components/faker/faker/tests/__init__.py | 2 | 14415 | # coding=utf-8
from __future__ import unicode_literals
__loader__ = None
import datetime
import json
import os
import time
import unittest
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from faker import Generator, Factory
from faker.generator import random
from faker.utils import text, decorators
try:
string_types = (basestring,)
except NameError:
string_types = (str,)
TEST_DIR = os.path.dirname(__file__)
class BarProvider(object):
def foo_formatter(self):
return 'barfoo'
class FooProvider(object):
def foo_formatter(self):
return 'foobar'
def foo_formatter_with_arguments(self, param='', append=''):
return 'baz' + param + append
class ShimsTestCase(unittest.TestCase):
def test_counter(self):
from faker.shims import Counter
result = Counter('abbb') + Counter('bcc')
self.assertEqual(result, Counter({'b': 4, 'c': 2, 'a': 1}))
result = Counter('abbbc') - Counter('bccd')
self.assertEqual(result, Counter({'b': 2, 'a': 1}))
result = Counter('abbb') | Counter('bcc')
self.assertEqual(result, Counter({'b': 3, 'c': 2, 'a': 1}))
result = Counter('abbb') & Counter('bcc')
self.assertEqual(result, Counter({'b': 1}))
result = sorted(Counter('abracadabra').most_common(3))
self.assertEqual(result, [('a', 5), ('b', 2), ('r', 2)])
result = sorted(Counter('ABCABC').elements())
self.assertEqual(result, ['A', 'A', 'B', 'B', 'C', 'C'])
counter = Counter('which')
counter.update('witch')
d = Counter('watch')
counter.update(d)
self.assertEqual(counter['h'], 4)
class UtilsTestCase(unittest.TestCase):
def test_choice_distribution(self):
from faker.utils.distribution import choice_distribution
a = ('a', 'b', 'c', 'd')
p = (0.5, 0.2, 0.2, 0.1)
sample = choice_distribution(a, p)
self.assertTrue(sample in a)
with open(os.path.join(TEST_DIR, 'random_state.json'), 'r') as fh:
random_state = json.load(fh)
random_state[1] = tuple(random_state[1])
random.setstate(random_state)
samples = [choice_distribution(a, p) for i in range(100)]
a_pop = len([i for i in samples if i == 'a'])
b_pop = len([i for i in samples if i == 'b'])
c_pop = len([i for i in samples if i == 'c'])
d_pop = len([i for i in samples if i == 'd'])
boundaries = []
tolerance = 5
for probability in p:
boundaries.append([100 * probability + tolerance, 100 * probability - tolerance])
self.assertTrue(boundaries[0][0] > a_pop > boundaries[0][1])
self.assertTrue(boundaries[1][0] > b_pop > boundaries[1][1])
self.assertTrue(boundaries[2][0] > c_pop > boundaries[2][1])
self.assertTrue(boundaries[3][0] > d_pop > boundaries[3][1])
def test_add_dicts(self):
from faker.utils.datasets import add_dicts
t1 = {'a':1, 'b':2}
t2 = {'b':1, 'c':3}
t3 = {'d':4}
result = add_dicts(t1, t2, t3)
self.assertEqual(result, {'a': 1, 'c': 3, 'b': 3, 'd': 4})
def test_find_available_locales(self):
from faker.utils.loading import find_available_locales
from faker.config import PROVIDERS
result = find_available_locales(PROVIDERS)
self.assertNotEqual(len(result), 0)
def test_find_available_providers(self):
from faker.utils.loading import find_available_providers
from faker.config import META_PROVIDERS_MODULES
from importlib import import_module
modules = [import_module(path) for path in META_PROVIDERS_MODULES]
providers = find_available_providers(modules)
expected_providers = list(map(str, [
'faker.providers.address',
'faker.providers.barcode',
'faker.providers.color',
'faker.providers.company',
'faker.providers.credit_card',
'faker.providers.currency',
'faker.providers.date_time',
'faker.providers.file',
'faker.providers.internet',
'faker.providers.job',
'faker.providers.lorem',
'faker.providers.misc',
'faker.providers.person',
'faker.providers.phone_number',
'faker.providers.profile',
'faker.providers.python',
'faker.providers.ssn',
'faker.providers.user_agent',
]))
self.assertEqual(providers, expected_providers)
class FactoryTestCase(unittest.TestCase):
def setUp(self):
self.generator = Generator()
self.provider = FooProvider()
self.generator.add_provider(self.provider)
def test_add_provider_gives_priority_to_newly_added_provider(self):
self.generator.add_provider(BarProvider())
self.assertEqual('barfoo', self.generator.format('foo_formatter'))
def test_get_formatter_returns_callable(self):
formatter = self.generator.get_formatter('foo_formatter')
self.assertTrue(hasattr(formatter, '__call__')
or isinstance(formatter, (classmethod, staticmethod)))
def test_get_formatter_returns_correct_formatter(self):
self.assertEqual(self.provider.foo_formatter,
self.generator.get_formatter('foo_formatter'))
def test_get_formatter_throws_exception_on_incorrect_formatter(self):
self.assertRaises(AttributeError,
self.generator.get_formatter, 'barFormatter')
def test_format_calls_formatter_on_provider(self):
self.assertEqual('foobar', self.generator.format('foo_formatter'))
def test_format_transfers_arguments_to_formatter(self):
result = self.generator.format('foo_formatter_with_arguments',
'foo', append='!')
self.assertEqual('bazfoo!', result)
def test_parse_returns_same_string_when_it_contains_no_curly_braces(self):
self.assertEqual('fooBar#?', self.generator.parse('fooBar#?'))
def test_parse_returns_string_with_tokens_replaced_by_formatters(self):
result = self.generator.parse(
'This is {{foo_formatter}} a text with "{{ foo_formatter }}"')
self.assertEqual('This is foobar a text with " foobar "', result)
# def testParseReturnsStringWithTokensReplacedByFormatterWithArguments(self):
# result = self.generator.parse(
# 'This is {{foo_formatter_with_arguments:bar}}')
# self.assertEqual('This is foobar', result)
def test_magic_call_calls_format(self):
self.assertEqual('foobar', self.generator.foo_formatter())
def test_magic_call_calls_format_with_arguments(self):
self.assertEqual('bazfoo',
self.generator.foo_formatter_with_arguments('foo'))
def test_documentor(self):
from faker.cli import print_doc
output = StringIO()
print_doc(output=output)
print_doc('address', output=output)
print_doc('faker.providers.person.it_IT', output=output)
assert output.getvalue()
self.assertRaises(AttributeError,
self.generator.get_formatter,
'barFormatter')
def test_command(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
command = Command(['faker', 'address'])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_command_custom_provider(self):
from faker.cli import Command
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
command = Command(['faker', 'foo', '-i', 'faker.tests.mymodule.en_US'])
command.execute()
assert sys.stdout.getvalue()
finally:
sys.stdout = orig_stdout
def test_slugify(self):
slug = text.slugify("a'b/c")
self.assertEqual(slug, 'abc')
slug = text.slugify("àeìöú")
self.assertEqual(slug, 'aeiou')
slug = text.slugify("àeì.öú")
self.assertEqual(slug, 'aeiou')
slug = text.slugify("àeì.öú", allow_dots=True)
self.assertEqual(slug, 'aei.ou')
@decorators.slugify
def fn(s):
return s
slug = fn("a'b/c")
self.assertEqual(slug, 'abc')
@decorators.slugify_domain
def fn(s):
return s
slug = fn("a'b/.c")
self.assertEqual(slug, 'ab.c')
def test_random_element(self):
from faker.providers import BaseProvider
provider = BaseProvider(None)
choices = ('a', 'b', 'c', 'd')
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
choices = {'a': 5, 'b': 2, 'c': 2, 'd':1 }
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
choices = {'a': 0.5, 'b': 0.2, 'c': 0.2, 'd':0.1}
pick = provider.random_element(choices)
self.assertTrue(pick in choices)
def test_datetime_safe(self):
from faker.utils import datetime_safe
# test using example provided in module
result = datetime_safe.date(1850, 8, 2).strftime('%Y/%m/%d was a %A')
self.assertEqual(result, '1850/08/02 was a Friday')
# test against certain formatting strings used on pre-1900 dates
# NOTE: the lambda approach in assertRaises is needed for Python 2.6
# in 2.7 and 3.x we could also use:
# with self.assertRaises(TypeError):
# datetime_safe.date(1850, 8, 2).strftime('%s')
self.assertRaises(
TypeError,
lambda: datetime_safe.date(1850, 8, 2).strftime('%s'))
self.assertRaises(
TypeError,
lambda: datetime_safe.date(1850, 8, 2).strftime('%y'))
# test using 29-Feb-2012 and escaped percentage sign
result = datetime_safe.date(2012, 2, 29).strftime('%Y-%m-%d was a 100%% %A')
self.assertEqual(result, r'2012-02-29 was a 100% Wednesday')
# test that certain formatting strings are allowed on post-1900 dates
result = datetime_safe.date(2008, 2, 29).strftime('%y')
self.assertEqual(result, r'08')
def test_date_time_between_dates(self):
from faker.providers.date_time import Provider
provider = Provider
timestamp_start = random.randint(0,10000000000)
timestamp_end = timestamp_start+1
datetime_start = datetime.datetime.fromtimestamp(timestamp_start)
datetime_end = datetime.datetime.fromtimestamp(timestamp_end)
random_date = provider.date_time_between_dates(datetime_start, datetime_end)
self.assertTrue(datetime_start <= random_date)
self.assertTrue(datetime_end >= random_date)
def _datetime_to_time(self, value):
return int(time.mktime(value.timetuple()))
def test_date_time_this_period(self):
from faker.providers.date_time import Provider
provider = Provider
# test century
self.assertTrue(self._datetime_to_time(provider.date_time_this_century(after_now=False)) <= self._datetime_to_time(datetime.datetime.now()))
self.assertTrue(self._datetime_to_time(provider.date_time_this_century(before_now=False, after_now=True)) >= self._datetime_to_time(datetime.datetime.now()))
# test decade
self.assertTrue(self._datetime_to_time(provider.date_time_this_decade(after_now=False)) <= self._datetime_to_time(datetime.datetime.now()))
self.assertTrue(self._datetime_to_time(provider.date_time_this_decade(before_now=False, after_now=True)) >= self._datetime_to_time(datetime.datetime.now()))
self.assertEqual(self._datetime_to_time(provider.date_time_this_decade(before_now=False, after_now=False)),
self._datetime_to_time(datetime.datetime.now()))
# test year
self.assertTrue(self._datetime_to_time(provider.date_time_this_year(after_now=False)) <= self._datetime_to_time(datetime.datetime.now()))
self.assertTrue(self._datetime_to_time(provider.date_time_this_year(before_now=False, after_now=True)) >= self._datetime_to_time(datetime.datetime.now()))
self.assertEqual(self._datetime_to_time(provider.date_time_this_year(before_now=False, after_now=False)),
self._datetime_to_time(datetime.datetime.now()))
# test month
self.assertTrue(self._datetime_to_time(provider.date_time_this_month(after_now=False)) <= self._datetime_to_time(datetime.datetime.now()))
self.assertTrue(self._datetime_to_time(provider.date_time_this_month(before_now=False, after_now=True)) >= self._datetime_to_time(datetime.datetime.now()))
self.assertEqual(self._datetime_to_time(provider.date_time_this_month(before_now=False, after_now=False)),
self._datetime_to_time(datetime.datetime.now()))
def test_prefix_suffix_always_string(self):
# Locales known to contain `*_male` and `*_female`.
for locale in ("bg_BG", "dk_DK", "en", "ru_RU", "tr_TR"):
f = Factory.create(locale=locale)
for x in range(20): # Probabilistic testing.
assert isinstance(f.prefix(), string_types)
assert isinstance(f.suffix(), string_types)
def test_no_words_sentence(self):
from faker.providers.lorem import Provider
provider = Provider(None)
paragraph = provider.paragraph(0)
self.assertEqual(paragraph, '')
def test_no_words_paragraph(self):
from faker.providers.lorem import Provider
provider = Provider(None)
sentence = provider.sentence(0)
self.assertEqual(sentence, '')
class GeneratorTestCase(unittest.TestCase):
def setUp(self):
self.generator = Generator()
@patch('random.seed')
def test_random_seed_doesnt_seed_system_random(self, mock_system_random):
self.generator.seed(0)
self.assertFalse(mock_system_random.called)
if __name__ == '__main__':
unittest.main()
| mit |
thinkgen/thirdparty | script.module.urlresolver/lib/urlresolver/plugins/movshare.py | 3 | 4053 | """
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""
RogerThis - 14/8/2011
Site: http://www.movshare.net
movshare hosts both avi and flv videos
"""
import re, urllib2, os
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
from lib import unwise
from lib import jsunpack
#SET ERROR_LOGO# THANKS TO VOINAGE, BSTRDMKR, ELDORADO
error_logo = os.path.join(common.addon_path, 'resources', 'images', 'redx.png')
class MovshareResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "movshare"
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
""" Human Verification """
try:
self.net.http_HEAD(web_url)
html = self.net.http_GET(web_url).content
"""movshare can do both flv and avi. There is no way I know before hand
if the url going to be a flv or avi. So the first regex tries to find
the avi file, if nothing is present, it will check for the flv file.
"param name="src" is for avi
"flashvars.file=" is for flv
"""
r = re.search('<param name="src" value="(.+?)"', html)
if not r:
html = unwise.unwise_process(html)
html = re.compile(r'eval\(function\(p,a,c,k,e,(?:d|r)\).+?\.split\(\'\|\'\).*?\)\)').search(html).group()
html = jsunpack.unpack(html)
filekey = unwise.resolve_var(html, "flashvars.filekey")
#get stream url from api
api = 'http://www.movshare.net/api/player.api.php?key=%s&file=%s' % (filekey, media_id)
html = self.net.http_GET(api).content
r = re.search('url=(.+?)&title', html)
if r:
stream_url = r.group(1)
else:
raise Exception ('File Not Found or removed')
return stream_url
except urllib2.URLError, e:
common.addon.log_error(self.name + ': got http error %d fetching %s' %
(e.code, web_url))
common.addon.show_small_popup('Error','Http error: '+str(e), 5000, error_logo)
return self.unresolvable(code=3, msg=e)
except Exception, e:
common.addon.log_error('**** Movshare Error occured: %s' % e)
common.addon.show_small_popup(title='[B][COLOR white]MOVSHARE[/COLOR][/B]', msg='[COLOR red]%s[/COLOR]' % e, delay=5000, image=error_logo)
return self.unresolvable(code=0, msg=e)
def get_url(self, host, media_id):
return 'http://www.movshare.net/video/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:video|embed)/([0-9a-z]+)', url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return re.match('http://(?:www.)?movshare.net/(?:video|embed)/',
url) or 'movshare' in host
| gpl-2.0 |
arpith20/linux | tools/perf/scripts/python/netdev-times.py | 1544 | 15191 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
callchain, irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 |
saraivaufc/askMathPlus | askmath/management.py | 1 | 6520 | import models
from askmath.models.users import Administrator, Teacher, Assistant, Student
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.db.models import signals
from django.db.models.signals import post_save
permissions = {
"access_manager": "Access Manager",
"access_content": "Access Content",
"access_manager_person": "Access Manager Person",
"read_person": "Read Person",
"write_person": "Write Person",
"write_administrator": "Write Administrator",
"write_teacher": "Write Teacher",
"write_assistant": "Write Assistant",
"write_student": "Write Student",
"access_manager_student_historic": "Access Manager Student Historic",
"read_student_historic": "Read StudentHistoric",
"write_student_historic": "Write StudentHistoric",
"access_manager_student_progress": "Access Manager Student Progress",
"read_student_progress": "Read StudentProgress",
"write_student_progress": "Write StudentProgress",
"access_manager_student_state": "Access Manager Student State",
"read_student_state": "Read StudentState",
"write_student_state": "Write StudentState",
"access_manager_statistics": "Access Manager Statistics",
"read_statistics": "Read Statistics",
"write_statistics": "Write Statistics",
"access_manager_classe": "Access Manager Classe",
"read_classe": "Read Classe",
"write_classe": "Write Classe",
"access_manager_discipline": "Access Manager Discipline",
"read_discipline": "Read Discipline",
"write_discipline": "Write Discipline",
"access_manager_lesson": "Access Manager Lesson",
"read_lesson": "Read Lesson",
"write_lesson": "Write Lesson",
"access_manager_question": "Access Manager Question",
"read_question": "Read Question",
"write_question": "Write Question",
"answer_question": "Answer Question",
"access_manager_video": "Access Manager Video",
"read_video": "Read Video",
"write_video": "Write Video",
"access_manager_message": "Access Manager Message",
"read_message": "Read Message",
"write_message": "Write Message",
"access_forum_admin": "Access Forum Admin",
"read_category": "Read Category",
"write_category": "Write Category",
"access_manager_ranking": "Access Manager Ranking",
"read_ranking": "Read Ranking",
"write_ranking": "Write Ranking",
"read_topic": "Read Topic",
"write_topic": "Write Topic",
"read_comment": "Read Comment",
"write_comment": "Write Comment",
}
group_permissions = {
"administrator": [
"access_manager",
"access_manager_person",
"read_person",
"write_person",
"write_administrator",
"write_teacher",
"write_assistant",
"access_manager_student_historic",
"read_student_historic",
"write_student_historic",
"access_manager_student_progress",
"read_student_progress",
"write_student_progress",
"access_manager_student_state",
"read_student_state",
"write_student_state",
"access_manager_statistics",
"read_statistics",
"write_statistics",
"access_manager_classe",
"read_classe",
"write_classe",
"access_manager_discipline",
"read_discipline",
"write_discipline",
"access_manager_lesson",
"read_lesson",
"write_lesson",
"access_manager_question",
"read_question",
"write_question",
"access_manager_video",
"read_video",
"write_video",
"access_manager_message",
"read_message",
"write_message",
"access_manager_ranking",
"read_ranking",
"access_forum_admin",
"read_category",
"write_category",
"read_topic",
"write_topic",
"read_comment",
"write_comment",
],
"teacher": [
"access_manager",
"access_manager_person",
"read_person",
"write_person",
"write_teacher",
"write_assistant",
"access_manager_student_historic",
"read_student_historic",
"access_manager_student_progress",
"read_student_progress",
"access_manager_student_state",
"read_student_state",
"access_manager_statistics",
"read_statistics",
"access_manager_classe",
"read_classe",
"write_classe",
"access_manager_discipline",
"read_discipline",
"write_discipline",
"access_manager_lesson",
"read_lesson",
"write_lesson",
"access_manager_question",
"read_question",
"write_question",
"access_manager_video",
"read_video",
"write_video",
"access_manager_message",
"read_message",
"write_message",
"access_manager_ranking",
"read_ranking",
"access_forum_admin",
"read_category",
"write_category",
"read_topic",
"write_topic",
"read_comment",
"write_comment",
],
"assistant": [
"access_manager",
"access_manager_discipline",
"read_discipline",
"access_manager_lesson",
"read_lesson",
"access_manager_lesson",
"read_lesson",
"write_lesson",
"access_manager_question",
"read_question",
"write_question",
"access_manager_video",
"read_video",
"write_video",
"access_forum_admin",
"read_category",
"read_topic",
"write_topic",
"read_comment",
"write_comment",
],
"student": [
"access_content",
"read_person",
"write_student_historic",
"write_student_progress",
"write_student_state",
"read_classe",
"read_discipline",
"read_lesson",
"read_question",
"answer_question",
"read_video",
"read_ranking",
"read_category",
"read_topic",
"write_topic",
"read_comment",
"write_comment",
],
}
def create_user_groups(app, created_models, verbosity, **kwargs):
if verbosity > 0:
print "Initialising data post_syncdb"
for group in group_permissions:
if group == 'administrator':
model = Administrator
elif group == 'teacher':
model = Teacher
elif group == 'assistant':
model = Assistant
elif group == 'student':
model = Student
else:
model = Student
content_type = ContentType.objects.get_for_model(model)
role, created = Group.objects.get_or_create(name=group)
if verbosity > 1 and created:
print 'Creating group', group
for perm in group_permissions[group]:
perm, created = Permission.objects.get_or_create(codename=perm, name=permissions[perm],
content_type=content_type)
role.permissions.add(perm)
if verbosity > 1:
print 'Permitting', group, 'to', perm
role.save()
def default_group(sender, instance, created, **kwargs):
if created:
instance.groups.add(Group.objects.get(name='student'))
post_save.connect(default_group, sender=User)
signals.post_syncdb.connect(
create_user_groups,
sender=models,
dispatch_uid='askmath.models.create_user_groups'
)
| gpl-2.0 |
dmsurti/mayavi | mayavi/tools/helper_functions.py | 1 | 40264 | """
Helper functions for mlab. These combine creation of the data sources,
and applying the modules to them to make standard visualization
operation. They should always return the module object created, for
consistency, and because retrieving the vtk data source from a module object
is possible via tools.get_vtk_src
Each helper function should have a test function associated with it,
both for testing and to ilustrate its use.
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2007, Enthought, Inc.
# License: BSD Style.
from .modules import VectorsFactory, StreamlineFactory, GlyphFactory, \
IsoSurfaceFactory, SurfaceFactory, ContourSurfaceFactory, \
ImageActorFactory, glyph_mode_dict
from .sources import vector_scatter, vector_field, scalar_scatter, \
scalar_field, line_source, array2d_source, grid_source, \
triangular_mesh_source, vertical_vectors_source
from .filters import ExtractVectorNormFactory, WarpScalarFactory, \
TubeFactory, ExtractEdgesFactory, PolyDataNormalsFactory, \
StripperFactory
from .animator import animate
from mayavi.core.scene import Scene
from .auto_doc import traits_doc, dedent
from . import tools
from traits.api import Array, Callable, CFloat, HasTraits, \
List, Trait, Any, Instance, TraitError, true
import numpy
def document_pipeline(pipeline):
def the_function(*args, **kwargs):
return pipeline(*args, **kwargs)
if hasattr(pipeline, 'doc'):
doc = pipeline.doc
elif pipeline.__doc__ is not None:
doc = pipeline.__doc__
else:
doc = ''
the_function.__doc__ = dedent("""%s
**Keyword arguments:**
%s""") % (dedent(doc),
traits_doc(pipeline.get_all_traits()),)
return the_function
#############################################################################
class Pipeline(HasTraits):
""" Function used to build pipelines for helper functions """
#doc = ''
_source_function = Callable()
_pipeline = List()
# Traits here only for documentation purposes
figure = Instance('mayavi.core.scene.Scene',
help='Figure to populate.')
def __call__(self, *args, **kwargs):
""" Calls the logics of the factory, but only after disabling
rendering, if needed.
"""
# First retrieve the scene, if any.
if 'figure' in kwargs:
figure = kwargs['figure']
assert isinstance(figure, (Scene, None))
scene = figure.scene
else:
scene = tools.gcf().scene
if scene is not None:
self._do_redraw = not scene.disable_render
scene.disable_render = True
# Then call the real logic
output = self.__call_internal__(*args, **kwargs)
# And re-enable the rendering, if needed.
if scene is not None:
scene.disable_render = not self._do_redraw
return output
def __call_internal__(self, *args, **kwargs):
""" Builds the source and runs through the pipeline, returning
the last object created by the pipeline."""
self.store_kwargs(kwargs)
self.source = self._source_function(*args, **kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
return self.build_pipeline()
def store_kwargs(self, kwargs):
""" Merges the given keyword argument, with traits default and
store the resulting dictionary in self.kwargs."""
kwargs = kwargs.copy()
all_traits = self.get_all_traits()
if not set(kwargs.keys()).issubset(list(all_traits.keys())):
raise ValueError("Invalid keyword arguments : %s" % \
', '.join(
str(k) for k in
set(kwargs.keys()).difference(list(all_traits.keys()))))
traits = self.get(self.class_trait_names())
[traits.pop(key) for key in list(traits.keys()) if key[0] == '_']
traits.update(kwargs)
self.kwargs = traits
def build_pipeline(self):
""" Runs through the pipeline, applying pipe after pipe. """
object = self.source
for pipe in self.pipeline:
keywords = set(pipe.class_trait_names())
keywords.remove('trait_added')
keywords.remove('trait_modified')
this_kwargs = {}
for key, value in self.kwargs.items():
if key in keywords:
this_kwargs[key] = value
object = pipe(object, **this_kwargs)._target
return object
def get_all_traits(self):
""" Returns all the traits of class, and the classes in the pipeline.
"""
traits = {}
for pipe in self._pipeline:
traits.update(pipe.class_traits())
traits.update(self.class_traits())
traits.pop('trait_added')
traits.pop('trait_modified')
return traits
#############################################################################
class Points3d(Pipeline):
"""
Plots glyphs (like points) at the position of the supplied data.
**Function signatures**::
points3d(x, y, z...)
points3d(x, y, z, s, ...)
points3d(x, y, z, f, ...)
x, y and z are numpy arrays, or lists, all of the same shape, giving
the positions of the points.
If only 3 arrays x, y, z are given, all the points are drawn with the
same size and color.
In addition, you can pass a fourth array s of the same
shape as x, y, and z giving an associated scalar value for each
point, or a function f(x, y, z) returning the scalar value. This
scalar value can be used to modulate the color and the size of the
points."""
_source_function = Callable(scalar_scatter)
_pipeline = [GlyphFactory, ]
scale_factor = Any('auto', help='The scaling applied to the glyphs. '
'the size of the glyph is by default calculated '
'from the inter-glyph spacing. Specify a float to '
'give the maximum glyph size in drawing units'
)
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the glyphs.
"""
scale_factor = kwargs.get('scale_factor', 'auto')
if scale_factor == 'auto':
kwargs['scale_factor'] = 1
g = Pipeline.__call_internal__(self, *args, **kwargs)
if scale_factor == 'auto':
g.glyph.glyph.scale_factor = \
tools._typical_distance(g.mlab_source.dataset)
g.glyph.glyph.clamping = True
else:
g.glyph.glyph.clamping = False
return g
points3d = document_pipeline(Points3d())
def test_points3d():
t = numpy.linspace(0, 4 * numpy.pi, 20)
cos = numpy.cos
sin = numpy.sin
x = sin(2 * t)
y = cos(t)
z = cos(2 * t)
s = 2 + sin(t)
return points3d(x, y, z, s, colormap="copper", scale_factor=.25)
@animate
def test_points3d_anim(obj=None):
"""Animates the test_points3d example."""
g = obj if obj is not None else test_points3d()
t = numpy.linspace(0, 4 * numpy.pi, 20)
# Animate the points3d.
ms = g.mlab_source
for i in range(10):
ms.z = numpy.cos(2 * t * 0.1 * (i + 1))
yield
def test_molecule():
"""Generates and shows a Caffeine molecule."""
o = [[30, 62, 19], [8, 21, 10]]
ox, oy, oz = list(map(numpy.array, zip(*o)))
n = [[31, 21, 11], [18, 42, 14], [55, 46, 17], [56, 25, 13]]
nx, ny, nz = list(map(numpy.array, zip(*n)))
c = [[5, 49, 15], [30, 50, 16], [42, 42, 15], [43, 29, 13], [18, 28, 12],
[32, 6, 8], [63, 36, 15], [59, 60, 20]]
cx, cy, cz = list(map(numpy.array, zip(*c)))
h = [[23, 5, 7], [32, 0, 16], [37, 5, 0], [73, 36, 16], [69, 60, 20],
[54, 62, 28], [57, 66, 12], [6, 59, 16], [1, 44, 22], [0, 49, 6]]
hx, hy, hz = list(map(numpy.array, zip(*h)))
oxygen = points3d(ox, oy, oz, scale_factor=16, scale_mode='none',
resolution=20, color=(1, 0, 0), name='Oxygen')
nitrogen = points3d(nx, ny, nz, scale_factor=20, scale_mode='none',
resolution=20, color=(0, 0, 1),
name='Nitrogen')
carbon = points3d(cx, cy, cz, scale_factor=20, scale_mode='none',
resolution=20, color=(0, 1, 0), name='Carbon')
hydrogen = points3d(hx, hy, hz, scale_factor=10, scale_mode='none',
resolution=20, color=(1, 1, 1),
name='Hydrogen')
return oxygen, nitrogen, carbon, hydrogen
#############################################################################
class Quiver3D(Points3d):
"""
Plots glyphs (like arrows) indicating the direction of the vectors
at the positions supplied.
**Function signatures**::
quiver3d(u, v, w, ...)
quiver3d(x, y, z, u, v, w, ...)
quiver3d(x, y, z, f, ...)
u, v, w are numpy arrays giving the components of the vectors.
If only 3 arrays, u, v, and w are passed, they must be 3D arrays, and
the positions of the arrows are assumed to be the indices of the
corresponding points in the (u, v, w) arrays.
If 6 arrays, (x, y, z, u, v, w) are passed, the 3 first arrays give
the position of the arrows, and the 3 last the components. They
can be of any shape.
If 4 positional arguments, (x, y, z, f) are passed, the last one must be
a callable, f, that returns vectors components (u, v, w) given the
positions (x, y, z)."""
scalars = Array(help="""optional scalar data.""")
_source_function = Callable(vector_scatter)
_pipeline = [VectorsFactory, ]
quiver3d = document_pipeline(Quiver3D())
def test_quiver3d():
x, y, z = numpy.mgrid[-2:3, -2:3, -2:3]
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 4)
u = y * numpy.sin(r) / (r + 0.001)
v = -x * numpy.sin(r) / (r + 0.001)
w = numpy.zeros_like(z)
obj = quiver3d(x, y, z, u, v, w, line_width=3, scale_factor=1)
return obj
def test_quiver3d_cone():
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[-5:5:8j, -5:5:8j, -5:5:8j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x)
v = sin(y)
w = sin(x * z)
obj = quiver3d(x, y, z, u, v, w, mode='cone', extent=(0, 1, 0, 1, 0, 1),
scale_factor=0.9)
return obj
def test_quiver3d_2d_data():
dims = [32, 32]
xmin, xmax, ymin, ymax = [-5, 5, -5, 5]
x, y = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j]
x = x.astype('f')
y = y.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x)
v = sin(y)
w = numpy.zeros_like(x)
return quiver3d(x, y, w, u, v, w, colormap="Purples",
scale_factor=0.5, mode="2dthick_arrow")
#############################################################################
class Flow(Pipeline):
"""
Creates a trajectory of particles following the flow of a vector field.
**Function signatures**::
flow(u, v, w, ...)
flow(x, y, z, u, v, w, ...)
flow(x, y, z, f, ...)
u, v, w are numpy arrays giving the components of the vectors.
If only 3 arrays, u, v, and w are passed, they must be 3D arrays, and
the positions of the arrows are assumed to be the indices of the
corresponding points in the (u, v, w) arrays.
If 6 arrays, (x, y, z, u, v, w) are passed, the 3 first arrays give
the position of the arrows, and the 3 last the components. The x, y
and z arrays are then supposed to have been generated by
`numpy.mgrid`, in other words, they are 3D arrays, with positions
lying on a 3D orthogonal and regularly spaced grid with nearest
neighbor in space matching nearest neighbor in the array. The
function builds a vector field assuming the points are regularly
spaced.
If 4 positional arguments, (x, y, z, f) are passed, the last one must be
a callable, f, that returns vectors components (u, v, w) given the
positions (x, y, z)."""
scalars = Array(help="""optional scalar data.""")
_source_function = Callable(vector_field)
_pipeline = [ExtractVectorNormFactory, StreamlineFactory, ]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply an
ExtractVectorNorm filter.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if tools._has_scalar_data(self.source):
self.pipeline.pop(0)
return self.build_pipeline()
flow = document_pipeline(Flow())
def test_flow():
x, y, z = numpy.mgrid[-4:4:40j, -4:4:40j, 0:4:20j]
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 2 + 0.1)
u = y * numpy.sin(r) / r
v = -x * numpy.sin(r) / r
w = numpy.ones_like(z)*0.05
obj = flow(u, v, w)
return obj
def test_flow_tubes():
dims = [32, 32, 32]
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j,
zmin:zmax:dims[2] * 1j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x / 2.)
v = sin(y / 2.)
w = sin(x * z / 4.)
obj = flow(x, y, z, u, v, w, linetype='tube')
return obj
@animate
def test_flow_anim(obj=None):
obj = obj if obj is not None else test_flow_tubes()
# Now animate the flow.
ms = obj.mlab_source
x, y, z = ms.x, ms.y, ms.z
for i in range(10):
u = numpy.cos(x / 2. + numpy.pi * (i + 1) / 10.)
w = numpy.sin(x * z / 4. + numpy.pi * (i + 1) / 10.)
ms.set(u=u, w=w)
yield
def test_flow_scalars():
dims = [32, 32, 32]
xmin, xmax, ymin, ymax, zmin, zmax = [-5, 5, -5, 5, -5, 5]
x, y, z = numpy.mgrid[xmin:xmax:dims[0] * 1j,
ymin:ymax:dims[1] * 1j,
zmin:zmax:dims[2] * 1j]
x = x.astype('f')
y = y.astype('f')
z = z.astype('f')
sin = numpy.sin
cos = numpy.cos
u = cos(x / 2.)
v = sin(y / 2.)
w = sin(x * z / 8.)
t = x * z
obj = flow(u, v, w, scalars=t, seedtype='plane',
linetype='tube', colormap='Spectral')
return obj
#############################################################################
class Contour3d(Pipeline):
"""
Plots iso-surfaces for a 3D volume of data suplied as arguments.
**Function signatures**::
contour3d(scalars, ...)
contour3d(x, y, z, scalars, ...)
scalars is a 3D numpy arrays giving the data on a grid.
If 4 arrays, (x, y, z, scalars) are passed, the 3 first arrays give
the position of the arrows, and the last the scalar value. The x, y
and z arrays are then supposed to have been generated by
`numpy.mgrid`, in other words, they are 3D arrays, with positions
lying on a 3D orthogonal and regularly spaced grid with nearest
neighbor in space matching nearest neighbor in the array. The
function builds a scalar field assuming the points are regularly
spaced.
If 4 positional arguments, (x, y, z, f) are passed, the last one
can also be a callable, f, that returns vectors components (u, v, w)
given the positions (x, y, z)."""
_source_function = Callable(scalar_field)
_pipeline = [IsoSurfaceFactory, ]
contour3d = document_pipeline(Contour3d())
def test_contour3d():
x, y, z = numpy.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
scalars = x * x * 0.5 + y * y + z * z * 2.0
obj = contour3d(scalars, contours=4, transparent=True)
return obj
@animate
def test_contour3d_anim(obj=None):
obj = obj if obj is not None else test_contour3d()
x, y, z = numpy.ogrid[-5:5:64j, -5:5:64j, -5:5:64j]
# Now animate the contours.
ms = obj.mlab_source
for i in range(1, 10):
ms.scalars = x * x * 0.5 + y * x * 0.1 * (i + 1) + z * z * 0.25
yield
#############################################################################
class Plot3d(Pipeline):
"""
Draws lines between points.
**Function signatures**::
plot3d(x, y, z, ...)
plot3d(x, y, z, s, ...)
x, y, z and s are numpy arrays or lists of the same shape. x, y and z
give the positions of the successive points of the line. s is an
optional scalar value associated with each point."""
tube_radius = Trait(0.025, CFloat, None,
adapts='filter.radius',
help="""radius of the tubes used to represent the
lines, If None, simple lines are used.
""")
_source_function = Callable(line_source)
_pipeline = [StripperFactory, TubeFactory, SurfaceFactory, ]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply
filters.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if self.kwargs['tube_radius'] is None:
self.pipeline.remove(TubeFactory)
self.pipeline.remove(StripperFactory)
return self.build_pipeline()
plot3d = document_pipeline(Plot3d())
def test_plot3d():
"""Generates a pretty set of lines."""
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi / 1000.0
phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi)
mu = phi * n_mer
x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)
y = numpy.sin(mu) * (1 + numpy.cos(n_long * mu / n_mer) * 0.5)
z = numpy.sin(n_long * mu / n_mer) * 0.5
l = plot3d(x, y, z, numpy.sin(mu), tube_radius=0.025, colormap='Spectral')
return l
@animate
def test_plot3d_anim(obj=None):
"""Generates a pretty set of lines and animates it."""
# Run the standard example and get the module generated.
obj = obj if obj is not None else test_plot3d()
# Some data from the test example for the animation.
n_mer, n_long = 6, 11
pi = numpy.pi
dphi = pi / 1000.0
phi = numpy.arange(0.0, 2 * pi + 0.5 * dphi, dphi, 'd')
mu = phi * n_mer
# Now animate the data.
ms = obj.mlab_source
for i in range(10):
x = numpy.cos(mu) * (1 + numpy.cos(n_long * mu / n_mer +
numpy.pi * (i + 1) / 5.) * 0.5)
scalars = numpy.sin(mu + numpy.pi * (i + 1) / 5)
ms.set(x=x, scalars=scalars)
yield
#############################################################################
class ImShow(Pipeline):
"""
View a 2D array as an image.
**Function signatures**::
imshow(s, ...)
s is a 2 dimension array. The values of s are mapped to a color using
the colormap."""
_source_function = Callable(array2d_source)
_pipeline = [ImageActorFactory, ]
imshow = document_pipeline(ImShow())
def test_imshow():
""" Use imshow to visualize a 2D 10x10 random array.
"""
s = numpy.random.random((10, 10))
return imshow(s, colormap='gist_earth')
#############################################################################
class Surf(Pipeline):
"""
Plots a surface using regularly-spaced elevation data supplied as a 2D
array.
**Function signatures**::
surf(s, ...)
surf(x, y, s, ...)
surf(x, y, f, ...)
s is the elevation matrix, a 2D array, where indices along the first
array axis represent x locations, and indices along the second array
axis represent y locations.
x and y can be 1D or 2D arrays such as returned by numpy.ogrid or
numpy.mgrid. Arrays returned by numpy.meshgrid require a transpose
first to obtain correct indexing order.
The points should be located on an orthogonal grid (possibly
non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value. For
arbitrary-shaped position arrays (non-orthogonal grids), see the mesh
function.
If only 1 array s is passed, the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinates of positions corresponding to the s values."""
_source_function = Callable(array2d_source)
_pipeline = [WarpScalarFactory, PolyDataNormalsFactory, SurfaceFactory]
warp_scale = Any(1, help="""scale of the z axis (warped from
the value of the scalar). By default this scale
is a float value.
If you specify 'auto', the scale is calculated to
give a pleasant aspect ratio to the plot,
whatever the bounds of the data.
If you specify a value for warp_scale in
addition to an extent, the warp scale will be
determined by the warp_scale, and the plot be
positioned along the z axis with the zero of the
data centered on the center of the extent. If you
are using explicit extents, this is the best way
to control the vertical scale of your plots.
If you want to control the extent (or range)
of the surface object, rather than its scale,
see the `extent` keyword argument.
""")
mask = Array(help="""boolean mask array to suppress some data points.
Note: this works based on colormapping of scalars and will
not work if you specify a solid color using the
`color` keyword.""")
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the axis.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
# Deal with both explicit warp scale and extent, this is
# slightly hairy. The wigner example is a good test case for
# this.
if not 'warp_scale' in kwargs and not 'extent' in kwargs:
try:
xi, xf, yi, yf, _, _ = self.source.data.bounds
zi, zf = self.source.data.scalar_range
except AttributeError:
xi, xf, yi, yf, _, _ = self.source.image_data.bounds
zi, zf = self.source.image_data.scalar_range
aspect_ratios = [(zf - zi) / (xf - xi), (zf - zi) / (yf - yi)]
if min(aspect_ratios) < 0.01 or max(aspect_ratios) > 100:
print('Warning: the range of your scalar values differs by ' \
'more than a factor 100 than the range of the grid values ' \
'and you did not '\
'specify a warp_scale. You could try warp_scale="auto".')
if 'warp_scale' in kwargs and not kwargs['warp_scale'] == 'auto' \
and 'extent' in kwargs:
# XXX: I should use the logging module.
print('Warning: both warp_scale and extent keyword argument ' \
'specified, the z bounds of the extents will be overridden')
xi, xf, yi, yf, zi, zf = kwargs['extent']
zo = 0.5 * (zi + zf)
try:
si, sf = self.source.data.scalar_range
except AttributeError:
si, sf = self.source.image_data.scalar_range
z_span = kwargs['warp_scale'] * abs(sf - si)
zi = zo + si * kwargs['warp_scale']
zf = zi + z_span
kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
kwargs['warp_scale'] = 1
elif kwargs.get('warp_scale', 1) == 'auto':
if 'extent' in kwargs:
if 'warp_scale' in kwargs:
print("Warning: extent specified, warp_scale='auto' " \
"ignored.")
else:
try:
xi, xf, yi, yf, _, _ = self.source.data.bounds
zi, zf = self.source.data.scalar_range
except AttributeError:
xi, xf, yi, yf, _, _ = self.source.image_data.bounds
zi, zf = self.source.image_data.scalar_range
z0 = zf - zi
dz = 0.3 * ((xf - xi) + (yf - yi))
zi = z0 - 0.5 * dz
zf = z0 + 0.5 * dz
kwargs['extent'] = (xi, xf, yi, yf, zi, zf)
kwargs['warp_scale'] = 1.
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
return self.build_pipeline()
surf = document_pipeline(Surf())
def test_simple_surf():
"""Test Surf with a simple collection of points."""
x, y = numpy.mgrid[0:3:1, 0:3:1]
return surf(x, y, numpy.asarray(x, 'd'))
@animate
def test_simple_surf_anim(obj=None):
"""Test Surf with a simple collection of points and animate it."""
obj = obj if obj is not None else test_simple_surf()
ms = obj.mlab_source
x = ms.x
for i in range(10):
ms.scalars = numpy.asarray(x * 0.1 * (i + 1), 'd')
yield
def test_surf():
"""Test surf on regularly spaced co-ordinates like MayaVi."""
def f(x, y):
sin, cos = numpy.sin, numpy.cos
return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)
x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]
s = surf(x, y, f)
#cs = contour_surf(x, y, f, contour_z=0)
return s
def test_surf_wigner():
def cat(x, y, alpha=2, eta=1, purity=1):
""" Multiphoton shrodinger cat. eta is the fidelity, alpha the number
of photons"""
cos = numpy.cos
exp = numpy.exp
return (1 + eta * (exp(-x ** 2 - (y - alpha) ** 2)
+ exp(-x ** 2 - (y + alpha) ** 2)
+ 2 * purity * exp(-x ** 2 - y ** 2) *
cos(2 * alpha * x)) / (2 * (1 + exp(-alpha ** 2)))) / 2
x, y = numpy.mgrid[-5:5:0.1, -5:5:0.1]
return surf(x, y, cat)
#############################################################################
class Mesh(Pipeline):
"""
Plots a surface using grid-spaced data supplied as 2D arrays.
**Function signatures**::
mesh(x, y, z, ...)
x, y, z are 2D arrays, all of the same shape, giving the positions of
the vertices of the surface. The connectivity between these points is
implied by the connectivity on the arrays.
For simple structures (such as orthogonal grids) prefer the `surf`
function, as it will create more efficient data structures. For mesh
defined by triangles rather than regular implicit connectivity, see the
`triangular_mesh` function.
"""
scale_mode = Trait('none', {'none': 'data_scaling_off',
'scalar': 'scale_by_scalar',
'vector': 'scale_by_vector'},
help="""the scaling mode for the glyphs
('vector', 'scalar', or 'none').""")
scale_factor = CFloat(0.05,
desc="""scale factor of the glyphs used to represent
the vertices, in fancy_mesh mode. """)
tube_radius = Trait(0.025, CFloat, None,
help="""radius of the tubes used to represent the
lines, in mesh mode. If None, simple lines are used.
""")
scalars = Array(help="""optional scalar data.""")
mask = Array(help="""boolean mask array to suppress some data points.
Note: this works based on colormapping of scalars and will
not work if you specify a solid color using the
`color` keyword.""")
representation = Trait('surface', 'wireframe', 'points', 'mesh',
'fancymesh',
desc="""the representation type used for the surface.""")
_source_function = Callable(grid_source)
_pipeline = [ExtractEdgesFactory, GlyphFactory, TubeFactory,
SurfaceFactory]
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to choose whether to apply
filters.
"""
self.source = self._source_function(*args, **kwargs)
kwargs.pop('name', None)
self.store_kwargs(kwargs)
# Copy the pipeline so as not to modify it for the next call
self.pipeline = self._pipeline[:]
if not self.kwargs['representation'] in ('mesh', 'fancymesh'):
self.pipeline.remove(ExtractEdgesFactory)
self.pipeline.remove(TubeFactory)
self.pipeline.remove(GlyphFactory)
self.pipeline = [PolyDataNormalsFactory, ] + self.pipeline
else:
if self.kwargs['tube_radius'] is None:
self.pipeline.remove(TubeFactory)
if not self.kwargs['representation'] == 'fancymesh':
self.pipeline.remove(GlyphFactory)
self.kwargs['representation'] = 'surface'
return self.build_pipeline()
mesh = document_pipeline(Mesh())
def test_mesh():
"""A very pretty picture of spherical harmonics translated from
the octaviz example."""
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
dphi, dtheta = pi / 250.0, pi / 250.0
[phi, theta] = numpy.mgrid[0:pi + dphi * 1.5:dphi,
0:2 * pi + dtheta * 1.5:dtheta]
m0 = 4
m1 = 3
m2 = 2
m3 = 3
m4 = 6
m5 = 2
m6 = 6
m7 = 4
r = sin(m0 * phi) ** m1 + cos(m2 * phi) ** m3 + \
sin(m4 * theta) ** m5 + cos(m6 * theta) ** m7
x = r * sin(phi) * cos(theta)
y = r * cos(phi)
z = r * sin(phi) * sin(theta)
return mesh(x, y, z, colormap="bone")
def test_mesh_sphere(r=1.0, npts=(100, 100), colormap='jet'):
"""Create a simple sphere."""
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
return mesh(x, y, z, colormap=colormap)
@animate
def test_mesh_sphere_anim(obj=None, r=1.0, npts=(100, 100), colormap='jet'):
"""Create a simple sphere and animate it."""
obj = obj if obj is not None else test_mesh_sphere(r, npts, colormap)
pi = numpy.pi
cos = numpy.cos
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
ms = obj.mlab_source
for i in range(1, 10):
z = (r + i * 0.25) * cos(phi)
ms.set(z=z, scalars=z)
yield
def test_mesh_mask_custom_colors(r=1.0, npts=(100, 100)):
"""Create a sphere with masking and using a custom colormap.
Note that masking works only when scalars are set. The custom colormap
illustrates how one can completely customize the colors with numpy arrays.
In this case we use a simple 2 color colormap.
"""
# Create the data like for test_mesh_sphere.
pi = numpy.pi
cos = numpy.cos
sin = numpy.sin
np_phi = npts[0] * 1j
np_theta = npts[1] * 1j
phi, theta = numpy.mgrid[0:pi:np_phi, 0:2 * pi:np_theta]
x = r * sin(phi) * cos(theta)
y = r * sin(phi) * sin(theta)
z = r * cos(phi)
# Setup the mask array.
mask = numpy.zeros_like(x).astype(bool)
mask[::5] = True
mask[:,::5] = True
# Create the mesh with the default colormapping.
m = mesh(x, y, z, scalars=z, mask=mask)
# Setup the colormap. This is an array of (R, G, B, A) values (each in
# range 0-255), there should be at least 2 colors in the array. If you
# want a constant color set the two colors to the same value.
colors = numpy.zeros((2, 4), dtype='uint8')
colors[0,2] = 255
colors[1,1] = 255
# Set the alpha value to fully visible.
colors[:,3] = 255
# Now setup the lookup table to use these colors.
m.module_manager.scalar_lut_manager.lut.table = colors
return m
def test_fancy_mesh():
"""Create a fancy looking mesh using mesh (example taken from octaviz)."""
pi = numpy.pi
cos = numpy.cos
du, dv = pi / 20.0, pi / 20.0
u, v = numpy.mgrid[0.01:pi + du * 1.5:du, 0:2 * pi + dv * 1.5:dv]
x = (1 - cos(u)) * cos(u + 2 * pi / 3) * cos(v + 2 * pi / 3.0) * 0.5
y = (1 - cos(u)) * cos(u + 2 * pi / 3) * cos(v - 2 * pi / 3.0) * 0.5
z = -cos(u - 2 * pi / 3.)
m = mesh(x, y, z, representation='fancymesh',
tube_radius=0.0075, colormap="RdYlGn")
return m
#############################################################################
class ContourSurf(Pipeline):
"""
Plots a the contours of a surface using grid-spaced data for
elevation supplied as a 2D array.
**Function signatures**::
contour_surf(s, ...)
contour_surf(x, y, s, ...)
contour_surf(x, y, f, ...)
s is the elevation matrix, a 2D array. The contour lines plotted
are lines of equal s value.
x and y can be 1D or 2D arrays (such as returned by numpy.ogrid or
numpy.mgrid), but the points should be located on an orthogonal grid
(possibly non-uniform). In other words, all the points sharing a same
index in the s array need to have the same x or y value. For
arbitrary-shaped position arrays (non-orthogonal grids), see the mesh
function.
If only 1 array s is passed, the x and y arrays are assumed to be
made from the indices of arrays, and an uniformly-spaced data set is
created.
If 3 positional arguments are passed the last one must be an array s,
or a callable, f, that returns an array. x and y give the
coordinates of positions corresponding to the s values."""
_source_function = Callable(array2d_source)
_pipeline = [WarpScalarFactory, ContourSurfaceFactory]
contour_surf = document_pipeline(ContourSurf())
def test_contour_surf():
"""Test contour_surf on regularly spaced co-ordinates like MayaVi."""
def f(x, y):
sin, cos = numpy.sin, numpy.cos
return sin(x + y) + sin(2 * x - y) + cos(3 * x + 4 * y)
x, y = numpy.mgrid[-7.:7.05:0.1, -5.:5.05:0.05]
s = contour_surf(x, y, f)
return s
#############################################################################
# Expose only the glyphs that make (more or less) sense for a barchart.
bar_mode_dict = dict()
for item in ('cube', '2dtriangle', '2dsquare', '2dvertex', '2dthick_cross',
'2ddiamond', '2dcross', '2dcircle'):
bar_mode_dict[item] = glyph_mode_dict[item]
class BarChart(Pipeline):
"""
Plots vertical glyphs (like bars) scaled vertical, to do
histogram-like plots.
This functions accepts a wide variety of inputs, with positions given
in 2-D or in 3-D.
**Function signatures**::
barchart(s, ...)
barchart(x, y, s, ...)
barchart(x, y, f, ...)
barchart(x, y, z, s, ...)
barchart(x, y, z, f, ...)
If only one positional argument is passed, it can be a 1-D, 2-D, or 3-D
array giving the length of the vectors. The positions of the data
points are deducted from the indices of array, and an
uniformly-spaced data set is created.
If 3 positional arguments (x, y, s) are passed the last one must be
an array s, or a callable, f, that returns an array. x and y give the
2D coordinates of positions corresponding to the s values.
If 4 positional arguments (x, y, z, s) are passed, the 3 first are
arrays giving the 3D coordinates of the data points, and the last one
is an array s, or a callable, f, that returns an array giving the
data value.
"""
_source_function = Callable(vertical_vectors_source)
_pipeline = [VectorsFactory, ]
mode = Trait('cube', bar_mode_dict,
desc='The glyph used to represent the bars.')
lateral_scale = CFloat(0.9, desc='The lateral scale of the glyph, '
'in units of the distance between nearest points')
auto_scale = true(desc='whether to compute automatically the '
'lateral scaling of the glyphs. This might be '
'computationally expensive.')
def __call_internal__(self, *args, **kwargs):
""" Override the call to be able to scale automatically the axis.
"""
g = Pipeline.__call_internal__(self, *args, **kwargs)
gs = g.glyph.glyph_source
# Use a cube source for glyphs.
if not 'mode' in kwargs:
gs.glyph_source = gs.glyph_dict['cube_source']
# Position the glyph tail on the point.
gs.glyph_position = 'tail'
gs.glyph_source.center = (0.0, 0.0, 0.5)
g.glyph.glyph.orient = False
if not 'color' in kwargs:
g.glyph.color_mode = 'color_by_scalar'
if not 'scale_mode' in kwargs:
g.glyph.scale_mode = 'scale_by_vector_components'
g.glyph.glyph.clamping = False
# The auto-scaling code. It involves finding the minimum
# distance between points, which can be very expensive. We
# shortcut this calculation for structured data
if len(args) == 1 or self.auto_scale:
min_axis_distance = 1
else:
x, y, z = g.mlab_source.x, g.mlab_source.y, g.mlab_source.z
min_axis_distance = \
tools._min_axis_distance(x, y, z)
scale_factor = g.glyph.glyph.scale_factor * min_axis_distance
lateral_scale = kwargs.pop('lateral_scale', self.lateral_scale)
try:
g.glyph.glyph_source.glyph_source.y_length = \
lateral_scale / (scale_factor)
g.glyph.glyph_source.glyph_source.x_length = \
lateral_scale / (scale_factor)
except TraitError:
" Not all types of glyphs have controlable y_length and x_length"
return g
barchart = document_pipeline(BarChart())
def test_barchart():
""" Demo the bar chart plot with a 2D array.
"""
s = numpy.abs(numpy.random.random((3, 3)))
return barchart(s)
#############################################################################
class TriangularMesh(Mesh):
"""
Plots a surface using a mesh defined by the position of its vertices
and the triangles connecting them.
**Function signatures**::
triangular_mesh(x, y, z, triangles ...)
x, y, z are arrays giving the positions of the vertices of the surface.
triangles is a list of triplets (or an array) list the vertices in
each triangle. Vertices are indexes by their appearance number in the
position arrays.
For simple structures (such as rectangular grids) prefer the surf or
mesh functions, as they will create more efficient data structures.
"""
_source_function = Callable(triangular_mesh_source)
triangular_mesh = document_pipeline(TriangularMesh())
def test_triangular_mesh():
"""An example of a cone, ie a non-regular mesh defined by its
triangles.
"""
n = 8
t = numpy.linspace(-numpy.pi, numpy.pi, n)
z = numpy.exp(1j * t)
x = z.real.copy()
y = z.imag.copy()
z = numpy.zeros_like(x)
triangles = [(0, i, i + 1) for i in range(1, n)]
x = numpy.r_[0, x]
y = numpy.r_[0, y]
z = numpy.r_[1, z]
t = numpy.r_[0, t]
return triangular_mesh(x, y, z, triangles, scalars=t)
| bsd-3-clause |
Motaku/ansible | lib/ansible/plugins/strategies/__init__.py | 20 | 23501 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import queue as Queue
import time
from ansible.errors import *
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.playbook.handler import Handler
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.role import hash_params
from ansible.plugins import _basedirs, filter_loader, lookup_loader, module_loader
from ansible.template import Templar
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
# FIXME: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.basedirs = _basedirs[:]
self.filter_loader = filter_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm.get_notified_handlers()
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
def run(self, iterator, play_context, result=True):
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = self._tqm._failed_hosts.keys()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
self._display.debug("running handlers")
result &= self.run_handlers(iterator, play_context)
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(self._tqm._failed_hosts.keys())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# send the stats callback
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
if len(unreachable_hosts) > 0:
return 3
elif len(failed_hosts) > 0:
return 2
elif not result:
return 1
else:
return 0
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
new_vars = vars.copy()
new_vars['ansible_current_hosts'] = self.get_hosts_remaining(play)
new_vars['ansible_failed_hosts'] = self.get_failed_hosts(play)
return new_vars
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
self._display.debug("entering _queue_task() for %s/%s" % (host, task))
# and then queue the new task
self._display.debug("%s - putting task (%s) in queue" % (host, task))
try:
self._display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
(worker_prc, main_q, rslt_q) = self._workers[self._cur_worker]
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
main_q.put((host, task, self._loader.get_basedir(), task_vars, play_context, shared_loader_obj), block=False)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
self._display.debug("got an error while queuing: %s" % e)
return
self._display.debug("exiting _queue_task() for %s/%s" % (host, task))
def _process_pending_results(self, iterator):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
while not self._final_q.empty() and not self._tqm._terminated:
try:
result = self._final_q.get(block=False)
self._display.debug("got result from result worker: %s" % ([unicode(x) for x in result],))
# all host status messages contain 2 entries: (msg, task_result)
if result[0] in ('host_task_ok', 'host_task_failed', 'host_task_skipped', 'host_unreachable'):
task_result = result[1]
host = task_result._host
task = task_result._task
if result[0] == 'host_task_failed' or task_result.is_failed():
if not task.ignore_errors:
self._display.debug("marking %s as failed" % host.name)
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
else:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=task.ignore_errors)
elif result[0] == 'host_unreachable':
self._tqm._unreachable_hosts[host.name] = True
self._tqm._stats.increment('dark', host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif result[0] == 'host_task_skipped':
self._tqm._stats.increment('skipped', host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
elif result[0] == 'host_task_ok':
self._tqm._stats.increment('ok', host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', host.name)
self._tqm.send_callback('v2_runner_on_ok', task_result)
if self._diff and 'diff' in task_result._result:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._pending_results -= 1
if host.name in self._blocked_hosts:
del self._blocked_hosts[host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if task_result._task._role is not None and result[0] in ('host_task_ok', 'host_task_failed'):
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iterator._play.ROLE_CACHE[task_result._task._role._role_name].iteritems():
params = task_result._task._role._role_params
if task_result._task._role.tags is not None:
params['tags'] = task_result._task._role.tags
if task_result._task._role.when is not None:
params['when'] = task_result._task._role.when
hashed_entry = hash_params(params)
if entry == hashed_entry:
role_obj._had_task_run = True
ret_results.append(task_result)
elif result[0] == 'add_host':
task_result = result[1]
new_host_info = task_result.get('add_host', dict())
self._add_host(new_host_info)
elif result[0] == 'add_group':
task = result[1]
self._add_group(task, iterator)
elif result[0] == 'notify_handler':
task_result = result[1]
handler_name = result[2]
original_task = iterator.get_original_task(task_result._host, task_result._task)
if handler_name not in self._notified_handlers:
self._notified_handlers[handler_name] = []
if task_result._host not in self._notified_handlers[handler_name]:
self._notified_handlers[handler_name].append(task_result._host)
elif result[0] == 'register_host_var':
# essentially the same as 'set_host_var' below, however we
# never follow the delegate_to value for registered vars
host = result[1]
var_name = result[2]
var_value = result[3]
self._variable_manager.set_host_variable(host, var_name, var_value)
elif result[0] in ('set_host_var', 'set_host_facts'):
host = result[1]
task = result[2]
item = result[3]
if task.delegate_to is not None:
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
if item is not None:
task_vars['item'] = item
templar = Templar(loader=self._loader, variables=task_vars)
host_name = templar.template(task.delegate_to)
target_host = self._inventory.get_host(host_name)
if target_host is None:
target_host = Host(name=host_name)
else:
target_host = host
if result[0] == 'set_host_var':
var_name = result[4]
var_value = result[5]
self._variable_manager.set_host_variable(target_host, var_name, var_value)
elif result[0] == 'set_host_facts':
facts = result[4]
self._variable_manager.set_host_facts(target_host, facts)
else:
raise AnsibleError("unknown result message received: %s" % result[0])
except Queue.Empty:
pass
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
self._display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
results = self._process_pending_results(iterator)
ret_results.extend(results)
time.sleep(0.01)
self._display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info):
'''
Helper function to add a new host to inventory based on a task result.
'''
host_name = host_info.get('host_name')
# Check if host in cache, add if not
if host_name in self._inventory._hosts_cache:
new_host = self._inventory._hosts_cache[host_name]
else:
new_host = Host(name=host_name)
self._inventory._hosts_cache[host_name] = new_host
allgroup = self._inventory.get_group('all')
allgroup.add_host(new_host)
# Set/update the vars for this host
# FIXME: probably should have a set vars method for the host?
new_vars = host_info.get('host_vars', dict())
new_host.vars.update(new_vars)
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if not self._inventory.get_group(group_name):
new_group = Group(group_name)
self._inventory.add_group(new_group)
new_group.vars = self._inventory.get_group_variables(group_name)
else:
new_group = self._inventory.get_group(group_name)
new_group.add_host(new_host)
# add this host to the group cache
if self._inventory._groups_list is not None:
if group_name in self._inventory._groups_list:
if new_host.name not in self._inventory._groups_list[group_name]:
self._inventory._groups_list[group_name].append(new_host.name)
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
# FIXME: is this still required?
self._inventory.clear_pattern_cache()
def _add_group(self, task, iterator):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
groups = {}
changed = False
for host in self._inventory.get_hosts():
original_task = iterator.get_original_task(host, task)
all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=original_task)
templar = Templar(loader=self._loader, variables=all_vars)
group_name = templar.template(original_task.args.get('key'))
if task.evaluate_conditional(templar=templar, all_vars=all_vars):
if group_name not in groups:
groups[group_name] = []
groups[group_name].append(host)
for group_name, hosts in groups.iteritems():
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
new_group = Group(name=group_name)
self._inventory.add_group(new_group)
# and add the group to the proper hierarchy
allgroup = self._inventory.get_group('all')
allgroup.add_child_group(new_group)
changed = True
for host in hosts:
if group_name not in host.get_groups():
new_group.add_host(host)
changed = True
return changed
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
except AnsibleError, e:
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=str(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
if not isinstance(data, list):
raise AnsibleParserError("included task files must contain a list of tasks", obj=included_file._task._ds)
block_list = load_list_of_blocks(
data,
play=included_file._task._block._play,
parent_block=included_file._task._block,
task_include=included_file._task,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader
)
# set the vars for this task from those specified as params to the include
for b in block_list:
b.vars = included_file._args.copy()
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = True
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
handler_name = handler.get_name()
if handler_name in self._notified_handlers and len(self._notified_handlers[handler_name]):
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
host_results = []
for host in self._notified_handlers[handler_name]:
if not handler.has_triggered(host) and (host.name not in self._tqm._failed_hosts or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
task_vars = self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
#handler.flag_for_host(host)
results = self._process_pending_results(iterator)
host_results.extend(results)
results = self._wait_on_pending_results(iterator)
host_results.extend(results)
# wipe the notification list
self._notified_handlers[handler_name] = []
try:
included_files = IncludedFile.process_include_results(
host_results,
self._tqm,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError, e:
return False
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
for task in block.block:
if task.name in self._notified_handlers:
for host in included_file._hosts:
if host.name not in self._notified_handlers[task.name]:
self._notified_handlers[task.name].append(host)
else:
self._notified_handlers[task.name] = included_file._hosts[:]
# and add the new blocks to the list of handler blocks
handler_block.block.extend(block.block)
#iterator._play.handlers.extend(new_blocks)
except AnsibleError, e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._display.warning(str(e))
continue
self._display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret=False
if host:
msg = u'Perform task: %s on %s (y/n/c): ' % (task, host)
else:
msg = u'Perform task: %s (y/n/c): ' % task
resp = self._display.prompt(msg)
if resp.lower() in ['y','yes']:
self._display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
self._display.debug("User ran task and cancled step mode")
self._step = False
ret = True
else:
self._display.debug("User skipped task")
self._display.banner(msg)
return ret
| gpl-3.0 |
NaturalGIS/QGIS | python/plugins/processing/tools/vector.py | 41 | 3669 | # -*- coding: utf-8 -*-
"""
***************************************************************************
vector.py
---------------------
Date : February 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2013'
__copyright__ = '(C) 2013, Victor Olaya'
from qgis.core import (NULL,
QgsFeatureRequest)
def resolveFieldIndex(source, attr):
"""This method takes an object and returns the index field it
refers to in a layer. If the passed object is an integer, it
returns the same integer value. If the passed value is not an
integer, it returns the field whose name is the string
representation of the passed object.
Ir raises an exception if the int value is larger than the number
of fields, or if the passed object does not correspond to any
field.
"""
if isinstance(attr, int):
return attr
else:
index = source.fields().lookupField(attr)
if index == -1:
raise ValueError('Wrong field name')
return index
def values(source, *attributes):
"""Returns the values in the attributes table of a feature source,
for the passed fields.
Field can be passed as field names or as zero-based field indices.
Returns a dict of lists, with the passed field identifiers as keys.
It considers the existing selection.
It assumes fields are numeric or contain values that can be parsed
to a number.
"""
ret = {}
indices = []
attr_keys = {}
for attr in attributes:
index = resolveFieldIndex(source, attr)
indices.append(index)
attr_keys[index] = attr
# use an optimised feature request
request = QgsFeatureRequest().setSubsetOfAttributes(indices).setFlags(QgsFeatureRequest.NoGeometry)
for feature in source.getFeatures(request):
for i in indices:
# convert attribute value to number
try:
v = float(feature[i])
except:
v = None
k = attr_keys[i]
if k in ret:
ret[k].append(v)
else:
ret[k] = [v]
return ret
def convert_nulls(values, replacement=None):
"""
Converts NULL items in a list of values to a replacement value (usually None)
:param values: list of values
:param replacement: value to use in place of NULL
:return: converted list
"""
return [i if i != NULL else replacement for i in values]
def checkMinDistance(point, index, distance, points):
"""Check if distance from given point to all other points is greater
than given value.
"""
if distance == 0:
return True
neighbors = index.nearestNeighbor(point, 1)
if len(neighbors) == 0:
return True
if neighbors[0] in points:
np = points[neighbors[0]]
if np.sqrDist(point) < (distance * distance):
return False
return True
| gpl-2.0 |
haad/ansible-modules-core | files/lineinfile.py | 13 | 14501 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
# (c) 2014, Ahti Kitsik <ak@ahtik.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import pipes
import tempfile
DOCUMENTATION = """
---
module: lineinfile
author:
- "Daniel Hokka Zakrissoni (@dhozac)"
- "Ahti Kitsik (@ahtik)"
extends_documentation_fragment:
- files
- validate
short_description: Ensure a particular line is in a file, or replace an
existing line using a back-referenced regular expression.
description:
- This module will search a file for a line, and ensure that it is present or absent.
- This is primarily useful when you want to change a single line in
a file only. See the M(replace) module if you want to change
multiple, similar lines or check M(blockinfile) if you want to insert/update/remove a block of lines in a file.
For other cases, see the M(copy) or M(template) modules.
version_added: "0.7"
options:
dest:
required: true
aliases: [ name, destfile ]
description:
- The file to modify.
regexp:
required: false
version_added: 1.7
description:
- The regular expression to look for in every line of the file. For
C(state=present), the pattern to replace if found; only the last line
found will be replaced. For C(state=absent), the pattern of the line
to remove. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
state:
required: false
choices: [ present, absent ]
default: "present"
aliases: []
description:
- Whether the line should be there or not.
line:
required: false
description:
- Required for C(state=present). The line to insert/replace into the
file. If C(backrefs) is set, may contain backreferences that will get
expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.1"
description:
- Used with C(state=present). If set, line can contain backreferences
(both positional and named) that will get populated if the C(regexp)
matches. This flag changes the operation of the module slightly;
C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
doesn't match anywhere in the file, the file will be left unchanged.
If the C(regexp) does match, the last matching line will be replaced by
the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expression has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expression has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
choices: [ "yes", "no" ]
default: "no"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. By default it will fail if the file
is missing.
backup:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
others:
description:
- All arguments accepted by the M(file) module also work here.
required: false
"""
EXAMPLES = r"""
- lineinfile: dest=/etc/selinux/config regexp=^SELINUX= line=SELINUX=enforcing
- lineinfile: dest=/etc/sudoers state=absent regexp="^%wheel"
- lineinfile: dest=/etc/hosts regexp='^127\.0\.0\.1' line='127.0.0.1 localhost' owner=root group=root mode=0644
- lineinfile: dest=/etc/httpd/conf/httpd.conf regexp="^Listen " insertafter="^#Listen " line="Listen 8080"
- lineinfile: dest=/etc/services regexp="^# port for http" insertbefore="^www.*80/tcp" line="# port for http by default"
# Add a line to a file if it does not exist, without passing regexp
- lineinfile: dest=/tmp/testfile line="192.168.1.99 foo.lab.net foo"
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'"
- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
# Validate the sudoers file before saving
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
"""
def write_changes(module,lines,dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd,'wb')
f.writelines(lines)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc,err))
if valid:
module.atomic_move(tmpfile, os.path.realpath(dest))
def check_file_attrs(module, changed, message, diff):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False, diff=diff):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def present(module, dest, regexp, line, insertafter, insertbefore, create,
backup, backrefs):
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
if not os.path.exists(dest):
if not create:
module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
destpath = os.path.dirname(dest)
if not os.path.exists(destpath) and not module.check_mode:
os.makedirs(destpath)
lines = []
else:
f = open(dest, 'rb')
lines = f.readlines()
f.close()
if module._diff:
diff['before'] = ''.join(lines)
if regexp is not None:
mre = re.compile(regexp)
if insertafter not in (None, 'BOF', 'EOF'):
insre = re.compile(insertafter)
elif insertbefore not in (None, 'BOF'):
insre = re.compile(insertbefore)
else:
insre = None
# index[0] is the line num where regexp has been found
# index[1] is the line num where insertafter/inserbefore has been found
index = [-1, -1]
m = None
for lineno, cur_line in enumerate(lines):
if regexp is not None:
match_found = mre.search(cur_line)
else:
match_found = line == cur_line.rstrip('\r\n')
if match_found:
index[0] = lineno
m = match_found
elif insre is not None and insre.search(cur_line):
if insertafter:
# + 1 for the next line
index[1] = lineno + 1
if insertbefore:
# + 1 for the previous line
index[1] = lineno
msg = ''
changed = False
# Regexp matched a line in the file
if index[0] != -1:
if backrefs:
new_line = m.expand(line)
else:
# Don't do backref expansion if not asked.
new_line = line
if not new_line.endswith(os.linesep):
new_line += os.linesep
if lines[index[0]] != new_line:
lines[index[0]] = new_line
msg = 'line replaced'
changed = True
elif backrefs:
# Do absolutely nothing, since it's not safe generating the line
# without the regexp matching to populate the backrefs.
pass
# Add it to the beginning of the file
elif insertbefore == 'BOF' or insertafter == 'BOF':
lines.insert(0, line + os.linesep)
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')):
lines.append(os.linesep)
lines.append(line + os.linesep)
msg = 'line added'
changed = True
# insert* matched, but not the regexp
else:
lines.insert(index[1], line + os.linesep)
msg = 'line added'
changed = True
if module._diff:
diff['after'] = ''.join(lines)
backupdest = ""
if changed and not module.check_mode:
if backup and os.path.exists(dest):
backupdest = module.backup_local(dest)
write_changes(module, lines, dest)
if module.check_mode and not os.path.exists(dest):
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=diff)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, msg=msg, backup=backupdest, diff=difflist)
def absent(module, dest, regexp, line, backup):
if not os.path.exists(dest):
module.exit_json(changed=False, msg="file not present")
msg = ''
diff = {'before': '',
'after': '',
'before_header': '%s (content)' % dest,
'after_header': '%s (content)' % dest}
f = open(dest, 'rb')
lines = f.readlines()
f.close()
if module._diff:
diff['before'] = ''.join(lines)
if regexp is not None:
cre = re.compile(regexp)
found = []
def matcher(cur_line):
if regexp is not None:
match_found = cre.search(cur_line)
else:
match_found = line == cur_line.rstrip('\r\n')
if match_found:
found.append(cur_line)
return not match_found
lines = filter(matcher, lines)
changed = len(found) > 0
if module._diff:
diff['after'] = ''.join(lines)
backupdest = ""
if changed and not module.check_mode:
if backup:
backupdest = module.backup_local(dest)
write_changes(module, lines, dest)
if changed:
msg = "%s line(s) removed" % len(found)
attr_diff = {}
msg, changed = check_file_attrs(module, changed, msg, attr_diff)
attr_diff['before_header'] = '%s (file attributes)' % dest
attr_diff['after_header'] = '%s (file attributes)' % dest
difflist = [diff, attr_diff]
module.exit_json(changed=changed, found=len(found), msg=msg, backup=backupdest, diff=difflist)
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(required=True, aliases=['name', 'destfile']),
state=dict(default='present', choices=['absent', 'present']),
regexp=dict(default=None),
line=dict(aliases=['value']),
insertafter=dict(default=None),
insertbefore=dict(default=None),
backrefs=dict(default=False, type='bool'),
create=dict(default=False, type='bool'),
backup=dict(default=False, type='bool'),
validate=dict(default=None, type='str'),
),
mutually_exclusive=[['insertbefore', 'insertafter']],
add_file_common_args=True,
supports_check_mode=True
)
params = module.params
create = module.params['create']
backup = module.params['backup']
backrefs = module.params['backrefs']
dest = os.path.expanduser(params['dest'])
if os.path.isdir(dest):
module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
if params['state'] == 'present':
if backrefs and params['regexp'] is None:
module.fail_json(msg='regexp= is required with backrefs=true')
if params.get('line', None) is None:
module.fail_json(msg='line= is required with state=present')
# Deal with the insertafter default value manually, to avoid errors
# because of the mutually_exclusive mechanism.
ins_bef, ins_aft = params['insertbefore'], params['insertafter']
if ins_bef is None and ins_aft is None:
ins_aft = 'EOF'
line = params['line']
present(module, dest, params['regexp'], line,
ins_aft, ins_bef, create, backup, backrefs)
else:
if params['regexp'] is None and params.get('line', None) is None:
module.fail_json(msg='one of line= or regexp= is required with state=absent')
absent(module, dest, params['regexp'], params.get('line', None), backup)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
if __name__ == '__main__':
main()
| gpl-3.0 |
lele1122/pelican-plugins | pelican_comment_system/pelican_comment_system.py | 32 | 6416 | # -*- coding: utf-8 -*-
"""
Pelican Comment System
======================
A Pelican plugin, which allows you to add comments to your articles.
Author: Bernhard Scheirle
"""
from __future__ import unicode_literals
import logging
import os
import copy
logger = logging.getLogger(__name__)
from itertools import chain
from pelican import signals
from pelican.readers import Readers
from pelican.writers import Writer
from . comment import Comment
from . import avatars
_all_comments = []
def setdefault(pelican, settings):
from pelican.settings import DEFAULT_CONFIG
for key, value in settings:
DEFAULT_CONFIG.setdefault(key, value)
if not pelican:
return
for key, value in settings:
pelican.settings.setdefault(key, value)
def pelican_initialized(pelican):
from pelican.settings import DEFAULT_CONFIG
settings = [
('PELICAN_COMMENT_SYSTEM', False),
('PELICAN_COMMENT_SYSTEM_DIR', 'comments'),
('PELICAN_COMMENT_SYSTEM_IDENTICON_OUTPUT_PATH', 'images/identicon'),
('PELICAN_COMMENT_SYSTEM_IDENTICON_DATA', ()),
('PELICAN_COMMENT_SYSTEM_IDENTICON_SIZE', 72),
('PELICAN_COMMENT_SYSTEM_AUTHORS', {}),
('PELICAN_COMMENT_SYSTEM_FEED', os.path.join('feeds', 'comment.%s.atom.xml')),
('PELICAN_COMMENT_SYSTEM_FEED_ALL', os.path.join('feeds', 'comments.all.atom.xml')),
('COMMENT_URL', '#comment-{slug}')
]
setdefault(pelican, settings)
DEFAULT_CONFIG['PAGE_EXCLUDES'].append(
DEFAULT_CONFIG['PELICAN_COMMENT_SYSTEM_DIR'])
DEFAULT_CONFIG['ARTICLE_EXCLUDES'].append(
DEFAULT_CONFIG['PELICAN_COMMENT_SYSTEM_DIR'])
if pelican:
pelican.settings['PAGE_EXCLUDES'].append(
pelican.settings['PELICAN_COMMENT_SYSTEM_DIR'])
pelican.settings['ARTICLE_EXCLUDES'].append(
pelican.settings['PELICAN_COMMENT_SYSTEM_DIR'])
def initialize(article_generator):
avatars.init(
article_generator.settings['OUTPUT_PATH'],
article_generator.settings[
'PELICAN_COMMENT_SYSTEM_IDENTICON_OUTPUT_PATH'],
article_generator.settings['PELICAN_COMMENT_SYSTEM_IDENTICON_DATA'],
article_generator.settings[
'PELICAN_COMMENT_SYSTEM_IDENTICON_SIZE'] / 3,
article_generator.settings['PELICAN_COMMENT_SYSTEM_AUTHORS'],
)
def warn_on_slug_collision(items):
slugs = {}
for comment in items:
if not comment.slug in slugs:
slugs[comment.slug] = [comment]
else:
slugs[comment.slug].append(comment)
for slug, itemList in slugs.items():
len_ = len(itemList)
if len_ > 1:
logger.warning('There are %s comments with the same slug: %s', len_, slug)
for x in itemList:
logger.warning(' %s', x.source_path)
def write_feed_all(gen, writer):
if gen.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
if gen.settings['PELICAN_COMMENT_SYSTEM_FEED_ALL'] is None:
return
context = copy.copy(gen.context)
context['SITENAME'] += " - All Comments"
context['SITESUBTITLE'] = ""
path = gen.settings['PELICAN_COMMENT_SYSTEM_FEED_ALL']
global _all_comments
_all_comments = sorted(_all_comments)
_all_comments.reverse()
for com in _all_comments:
com.title = com.article.title + " - " + com.title
com.override_url = com.article.url + com.url
writer = Writer(gen.output_path, settings=gen.settings)
writer.write_feed(_all_comments, context, path)
def write_feed(gen, items, context, slug):
if gen.settings['PELICAN_COMMENT_SYSTEM_FEED'] is None:
return
path = gen.settings['PELICAN_COMMENT_SYSTEM_FEED'] % slug
writer = Writer(gen.output_path, settings=gen.settings)
writer.write_feed(items, context, path)
def add_static_comments(gen, content):
if gen.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
global _all_comments
content.comments_count = 0
content.comments = []
# Modify the local context, so we get proper values for the feed
context = copy.copy(gen.context)
context['SITEURL'] += "/" + content.url
context['SITENAME'] += " - Comments: " + content.title
context['SITESUBTITLE'] = ""
folder = os.path.join(
gen.settings['PATH'],
gen.settings['PELICAN_COMMENT_SYSTEM_DIR'],
content.slug
)
if not os.path.isdir(folder):
logger.debug("No comments found for: %s", content.slug)
write_feed(gen, [], context, content.slug)
return
reader = Readers(gen.settings)
comments = []
replies = []
for file in os.listdir(folder):
name, extension = os.path.splitext(file)
if extension[1:].lower() in reader.extensions:
com = reader.read_file(
base_path=folder, path=file,
content_class=Comment, context=context)
com.article = content
_all_comments.append(com)
if hasattr(com, 'replyto'):
replies.append(com)
else:
comments.append(com)
feed_items = sorted(comments + replies)
feed_items.reverse()
warn_on_slug_collision(feed_items)
write_feed(gen, feed_items, context, content.slug)
# TODO: Fix this O(n²) loop
for reply in replies:
for comment in chain(comments, replies):
if comment.slug == reply.replyto:
comment.addReply(reply)
count = 0
for comment in comments:
comment.sortReplies()
count += comment.countReplies()
comments = sorted(comments)
content.comments_count = len(comments) + count
content.comments = comments
def writeIdenticonsToDisk(gen, writer):
avatars.generateAndSaveMissingAvatars()
def pelican_finalized(pelican):
if pelican.settings['PELICAN_COMMENT_SYSTEM'] is not True:
return
global _all_comments
print('Processed %s comment(s)' % len(_all_comments))
_all_comments = []
def register():
signals.initialized.connect(pelican_initialized)
signals.article_generator_init.connect(initialize)
signals.article_generator_write_article.connect(add_static_comments)
signals.article_writer_finalized.connect(writeIdenticonsToDisk)
signals.article_writer_finalized.connect(write_feed_all)
signals.finalized.connect(pelican_finalized)
| agpl-3.0 |
StyXman/GitPython | git/refs/log.py | 14 | 10886 | from git.util import (
Actor,
LockedFD,
LockFile,
assure_directory_exists,
to_native_path,
)
from gitdb.util import (
bin_to_hex,
join,
file_contents_ro_filepath,
)
from git.objects.util import (
parse_date,
Serializable,
altz_to_utctz_str,
)
from git.compat import (
PY3,
xrange,
string_types,
defenc
)
import time
import re
__all__ = ["RefLog", "RefLogEntry"]
class RefLogEntry(tuple):
"""Named tuple allowing easy access to the revlog data fields"""
_re_hexsha_only = re.compile('^[0-9A-Fa-f]{40}$')
__slots__ = tuple()
def __repr__(self):
"""Representation of ourselves in git reflog format"""
res = self.format()
if PY3:
return res
else:
# repr must return a string, which it will auto-encode from unicode using the default encoding.
# This usually fails, so we encode ourselves
return res.encode(defenc)
def format(self):
""":return: a string suitable to be placed in a reflog file"""
act = self.actor
time = self.time
return u"{0} {1} {2} <{3}> {4!s} {5}\t{6}\n".format(self.oldhexsha,
self.newhexsha,
act.name,
act.email,
time[0],
altz_to_utctz_str(time[1]),
self.message)
@property
def oldhexsha(self):
"""The hexsha to the commit the ref pointed to before the change"""
return self[0]
@property
def newhexsha(self):
"""The hexsha to the commit the ref now points to, after the change"""
return self[1]
@property
def actor(self):
"""Actor instance, providing access"""
return self[2]
@property
def time(self):
"""time as tuple:
* [0] = int(time)
* [1] = int(timezone_offset) in time.altzone format """
return self[3]
@property
def message(self):
"""Message describing the operation that acted on the reference"""
return self[4]
@classmethod
def new(self, oldhexsha, newhexsha, actor, time, tz_offset, message):
""":return: New instance of a RefLogEntry"""
if not isinstance(actor, Actor):
raise ValueError("Need actor instance, got %s" % actor)
# END check types
return RefLogEntry((oldhexsha, newhexsha, actor, (time, tz_offset), message))
@classmethod
def from_line(cls, line):
""":return: New RefLogEntry instance from the given revlog line.
:param line: line bytes without trailing newline
:raise ValueError: If line could not be parsed"""
line = line.decode(defenc)
fields = line.split('\t', 1)
if len(fields) == 1:
info, msg = fields[0], None
elif len(fields) == 2:
info, msg = fields
else:
raise ValueError("Line must have up to two TAB-separated fields."
" Got %s" % repr(line))
# END handle first split
oldhexsha = info[:40]
newhexsha = info[41:81]
for hexsha in (oldhexsha, newhexsha):
if not cls._re_hexsha_only.match(hexsha):
raise ValueError("Invalid hexsha: %s" % hexsha)
# END if hexsha re doesn't match
# END for each hexsha
email_end = info.find('>', 82)
if email_end == -1:
raise ValueError("Missing token: >")
# END handle missing end brace
actor = Actor._from_string(info[82:email_end + 1])
time, tz_offset = parse_date(info[email_end + 2:])
return RefLogEntry((oldhexsha, newhexsha, actor, (time, tz_offset), msg))
class RefLog(list, Serializable):
"""A reflog contains reflog entries, each of which defines a certain state
of the head in question. Custom query methods allow to retrieve log entries
by date or by other criteria.
Reflog entries are orded, the first added entry is first in the list, the last
entry, i.e. the last change of the head or reference, is last in the list."""
__slots__ = ('_path', )
def __new__(cls, filepath=None):
inst = super(RefLog, cls).__new__(cls)
return inst
def __init__(self, filepath=None):
"""Initialize this instance with an optional filepath, from which we will
initialize our data. The path is also used to write changes back using
the write() method"""
self._path = filepath
if filepath is not None:
self._read_from_file()
# END handle filepath
def _read_from_file(self):
try:
fmap = file_contents_ro_filepath(self._path, stream=True, allow_mmap=True)
except OSError:
# it is possible and allowed that the file doesn't exist !
return
# END handle invalid log
try:
self._deserialize(fmap)
finally:
fmap.close()
# END handle closing of handle
#{ Interface
@classmethod
def from_file(cls, filepath):
"""
:return: a new RefLog instance containing all entries from the reflog
at the given filepath
:param filepath: path to reflog
:raise ValueError: If the file could not be read or was corrupted in some way"""
return cls(filepath)
@classmethod
def path(cls, ref):
"""
:return: string to absolute path at which the reflog of the given ref
instance would be found. The path is not guaranteed to point to a valid
file though.
:param ref: SymbolicReference instance"""
return join(ref.repo.git_dir, "logs", to_native_path(ref.path))
@classmethod
def iter_entries(cls, stream):
"""
:return: Iterator yielding RefLogEntry instances, one for each line read
sfrom the given stream.
:param stream: file-like object containing the revlog in its native format
or basestring instance pointing to a file to read"""
new_entry = RefLogEntry.from_line
if isinstance(stream, string_types):
stream = file_contents_ro_filepath(stream)
# END handle stream type
while True:
line = stream.readline()
if not line:
return
yield new_entry(line.strip())
# END endless loop
stream.close()
@classmethod
def entry_at(cls, filepath, index):
""":return: RefLogEntry at the given index
:param filepath: full path to the index file from which to read the entry
:param index: python list compatible index, i.e. it may be negative to
specifiy an entry counted from the end of the list
:raise IndexError: If the entry didn't exist
.. note:: This method is faster as it only parses the entry at index, skipping
all other lines. Nonetheless, the whole file has to be read if
the index is negative
"""
fp = open(filepath, 'rb')
if index < 0:
return RefLogEntry.from_line(fp.readlines()[index].strip())
else:
# read until index is reached
for i in xrange(index + 1):
line = fp.readline()
if not line:
break
# END abort on eof
# END handle runup
if i != index or not line:
raise IndexError
# END handle exception
return RefLogEntry.from_line(line.strip())
# END handle index
def to_file(self, filepath):
"""Write the contents of the reflog instance to a file at the given filepath.
:param filepath: path to file, parent directories are assumed to exist"""
lfd = LockedFD(filepath)
assure_directory_exists(filepath, is_file=True)
fp = lfd.open(write=True, stream=True)
try:
self._serialize(fp)
lfd.commit()
except:
# on failure it rolls back automatically, but we make it clear
lfd.rollback()
raise
# END handle change
@classmethod
def append_entry(cls, config_reader, filepath, oldbinsha, newbinsha, message):
"""Append a new log entry to the revlog at filepath.
:param config_reader: configuration reader of the repository - used to obtain
user information. May also be an Actor instance identifying the committer directly.
May also be None
:param filepath: full path to the log file
:param oldbinsha: binary sha of the previous commit
:param newbinsha: binary sha of the current commit
:param message: message describing the change to the reference
:param write: If True, the changes will be written right away. Otherwise
the change will not be written
:return: RefLogEntry objects which was appended to the log
:note: As we are append-only, concurrent access is not a problem as we
do not interfere with readers."""
if len(oldbinsha) != 20 or len(newbinsha) != 20:
raise ValueError("Shas need to be given in binary format")
# END handle sha type
assure_directory_exists(filepath, is_file=True)
committer = isinstance(config_reader, Actor) and config_reader or Actor.committer(config_reader)
entry = RefLogEntry((
bin_to_hex(oldbinsha).decode('ascii'),
bin_to_hex(newbinsha).decode('ascii'),
committer, (int(time.time()), time.altzone), message
))
lf = LockFile(filepath)
lf._obtain_lock_or_raise()
fd = open(filepath, 'ab')
try:
fd.write(entry.format().encode(defenc))
finally:
fd.close()
lf._release_lock()
# END handle write operation
return entry
def write(self):
"""Write this instance's data to the file we are originating from
:return: self"""
if self._path is None:
raise ValueError("Instance was not initialized with a path, use to_file(...) instead")
# END assert path
self.to_file(self._path)
return self
#} END interface
#{ Serializable Interface
def _serialize(self, stream):
write = stream.write
# write all entries
for e in self:
write(e.format().encode(defenc))
# END for each entry
def _deserialize(self, stream):
self.extend(self.iter_entries(stream))
#} END serializable interface
| bsd-3-clause |
stef1927/cassandra-dtest | upgrade_tests/regression_test.py | 6 | 7970 | """
Home for upgrade-related tests that don't fit in with the core upgrade testing in dtest.upgrade_through_versions
"""
import glob
import os
import re
import time
import pytest
import logging
from cassandra import ConsistencyLevel as CL
from dtest import RUN_STATIC_UPGRADE_MATRIX
from tools.jmxutils import (JolokiaAgent, make_mbean)
from tools.misc import add_skip
from .upgrade_base import UpgradeTester
from .upgrade_manifest import build_upgrade_pairs
since = pytest.mark.since
logger = logging.getLogger(__name__)
@pytest.mark.upgrade_test
class TestForRegressions(UpgradeTester):
"""
Catch-all class for regression tests on specific versions.
"""
NODES, RF, __test__, CL = 2, 1, False, CL.ONE
def test_10822(self):
"""
@jira_ticket CASSANDRA-10822
Original issue was seen when upgrading from 2.1 to 3.X versions.
"""
session = self.prepare()
session.execute("CREATE KEYSPACE financial WITH replication={'class':'SimpleStrategy', 'replication_factor': 1};")
session.execute("""
create table if not exists financial.symbol_history (
symbol text,
name text,
year int,
month int,
day int,
volume bigint,
close double,
open double,
low double,
high double,
primary key((symbol, year), month, day)
) with CLUSTERING ORDER BY (month desc, day desc);
""")
symbol_years = [('CORP', 2004), ('BLAH', 2005), ('FOO', 2006), ('BAR', 2007), ('HUH', 2008)]
for symbol, year in symbol_years:
for month in range(0, 50):
session.execute("INSERT INTO financial.symbol_history (symbol, name, year, month, day, volume) VALUES ('{}', 'MegaCorp', {}, {}, 1, 100)".format(symbol, year, month))
for symbol, year in symbol_years:
session.execute("DELETE FROM financial.symbol_history WHERE symbol='{}' and year = {} and month=25;".format(symbol, year, month))
sessions = self.do_upgrade(session)
for s in sessions:
expected_rows = 49
for symbol, year in symbol_years:
count = s[1].execute("select count(*) from financial.symbol_history where symbol='{}' and year={};".format(symbol, year))[0][0]
assert count == expected_rows, "actual {} did not match expected {}".format(count, expected_rows)
def test13294(self):
"""
Tests upgrades with files having a bunch of files with the same prefix as another file
this file is then compacted and we verify that no other sstables are removed
@jira_ticket CASSANDRA-13294
"""
cluster = self.cluster
cluster.set_datadir_count(1) # we want the same prefix for all sstables
session = self.prepare(jolokia=True)
session.execute("CREATE KEYSPACE test13294 WITH replication={'class':'SimpleStrategy', 'replication_factor': 2};")
session.execute("CREATE TABLE test13294.t (id int PRIMARY KEY, d int) WITH compaction = {'class': 'SizeTieredCompactionStrategy','enabled':'false'}")
for x in range(0, 5):
session.execute("INSERT INTO test13294.t (id, d) VALUES (%d, %d)" % (x, x))
cluster.flush()
node1 = cluster.nodelist()[0]
sstables = node1.get_sstables('test13294', 't')
node1.stop(wait_other_notice=True)
generation_re = re.compile(r'(.*-)(\d+)(-.*)')
mul = 1
first_sstable = ''
for sstable in sstables:
res = generation_re.search(sstable)
if res:
glob_for = "%s%s-*" % (res.group(1), res.group(2))
for f in glob.glob(glob_for):
res2 = generation_re.search(f)
new_filename = "%s%s%s" % (res2.group(1), mul, res2.group(3))
os.rename(f, new_filename)
if first_sstable == '' and '-Data' in new_filename:
first_sstable = new_filename # we should compact this
mul = mul * 10
node1.start(wait_other_notice=True)
sessions = self.do_upgrade(session)
checked = False
for is_upgraded, cursor in sessions:
if is_upgraded:
sstables_before = self.get_all_sstables(node1)
self.compact_sstable(node1, first_sstable)
time.sleep(2) # wait for sstables to get physically removed
sstables_after = self.get_all_sstables(node1)
# since autocompaction is disabled and we compact a single sstable above
# the number of sstables after should be the same as before.
assert len(sstables_before) == len(sstables_after)
checked = True
assert checked
@since('3.0.14', max_version='3.0.99')
def test_schema_agreement(self):
"""
Test that nodes agree on the schema during an upgrade in the 3.0.x series.
Create a table before upgrading the cluster and wait for schema agreement.
Upgrade one node and create one more table, wait for schema agreement and check
the schema versions with nodetool describecluster.
We know that schemas will not necessarily agree from 2.1/2.2 to 3.0.x or from 3.0.x to 3.x
and upwards, so we only test the 3.0.x series for now. We start with 3.0.13 because
there is a problem in 3.0.13, see CASSANDRA-12213 and 13559.
@jira_ticket CASSANDRA-13559
"""
session = self.prepare(nodes=5)
session.execute("CREATE TABLE schema_agreement_test_1 ( id int PRIMARY KEY, value text )")
session.cluster.control_connection.wait_for_schema_agreement(wait_time=30)
def validate_schema_agreement(n, is_upgr):
logger.debug("querying node {} for schema information, upgraded: {}".format(n.name, is_upgr))
response = n.nodetool('describecluster').stdout
logger.debug(response)
schemas = response.split('Schema versions:')[1].strip()
num_schemas = len(re.findall(r'\[.*?\]', schemas))
assert num_schemas == 1, "There were multiple schema versions during an upgrade: {}" \
.format(schemas)
for node in self.cluster.nodelist():
validate_schema_agreement(node, False)
for is_upgraded, session, node in self.do_upgrade(session, return_nodes=True):
validate_schema_agreement(node, is_upgraded)
if is_upgraded:
session.execute("CREATE TABLE schema_agreement_test_2 ( id int PRIMARY KEY, value text )")
session.cluster.control_connection.wait_for_schema_agreement(wait_time=30)
validate_schema_agreement(node, is_upgraded)
def compact_sstable(self, node, sstable):
mbean = make_mbean('db', type='CompactionManager')
with JolokiaAgent(node) as jmx:
jmx.execute_method(mbean, 'forceUserDefinedCompaction', [sstable])
def get_all_sstables(self, node):
# note that node.get_sstables(...) only returns current version sstables
keyspace_dirs = [os.path.join(node.get_path(), "data{0}".format(x), "test13294") for x in range(0, node.cluster.data_dir_count)]
files = []
for d in keyspace_dirs:
for f in glob.glob(d + "/*/*Data*"):
files.append(f)
return files
for path in build_upgrade_pairs():
gen_class_name = TestForRegressions.__name__ + path.name
assert gen_class_name not in globals()
spec = {'UPGRADE_PATH': path,
'__test__': True}
upgrade_applies_to_env = RUN_STATIC_UPGRADE_MATRIX or path.upgrade_meta.matches_current_env_version_family
cls = type(gen_class_name, (TestForRegressions,), spec)
if not upgrade_applies_to_env:
add_skip(cls, 'test not applicable to env.')
globals()[gen_class_name] = cls
| apache-2.0 |
postlund/home-assistant | homeassistant/components/cover/device_trigger.py | 8 | 7082 | """Provides device automations for Cover."""
from typing import List
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
numeric_state as numeric_state_automation,
state as state_automation,
)
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
POSITION_TRIGGER_TYPES = {"position", "tilt_position"}
STATE_TRIGGER_TYPES = {"opened", "closed", "opening", "closing"}
POSITION_TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_TRIGGER_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_TRIGGER_TYPES),
}
)
TRIGGER_SCHEMA = vol.Any(POSITION_TRIGGER_SCHEMA, STATE_TRIGGER_SCHEMA)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if not state or ATTR_SUPPORTED_FEATURES not in state.attributes:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add triggers for each entity that belongs to this integration
if supports_open_close:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opened",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closed",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opening",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closing",
}
)
if supported_features & SUPPORT_SET_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "position",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "tilt_position",
}
)
return triggers
async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List trigger capabilities."""
if config[CONF_TYPE] not in ["position", "tilt_position"]:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] in STATE_TRIGGER_TYPES:
if config[CONF_TYPE] == "opened":
to_state = STATE_OPEN
elif config[CONF_TYPE] == "closed":
to_state = STATE_CLOSED
elif config[CONF_TYPE] == "opening":
to_state = STATE_OPENING
elif config[CONF_TYPE] == "closing":
to_state = STATE_CLOSING
state_config = {
state_automation.CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_automation.CONF_TO: to_state,
}
state_config = state_automation.TRIGGER_SCHEMA(state_config)
return await state_automation.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
if config[CONF_TYPE] == "position":
position = "current_position"
if config[CONF_TYPE] == "tilt_position":
position = "current_tilt_position"
min_pos = config.get(CONF_ABOVE, -1)
max_pos = config.get(CONF_BELOW, 101)
value_template = f"{{{{ state.attributes.{position} }}}}"
numeric_state_config = {
numeric_state_automation.CONF_PLATFORM: "numeric_state",
numeric_state_automation.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
numeric_state_automation.CONF_BELOW: max_pos,
numeric_state_automation.CONF_ABOVE: min_pos,
numeric_state_automation.CONF_VALUE_TEMPLATE: value_template,
}
numeric_state_config = numeric_state_automation.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_automation.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
| apache-2.0 |
openaid-IATI/OIPA | OIPA/iati_organisation/parser/organisation_2_01.py | 1 | 27741 | import logging
from django.conf import settings
from geodata.models import Country
from iati.parser.exceptions import (
FieldValidationError, ParserError, RequiredFieldError
)
from iati.parser.iati_parser import IatiParser
from iati_codelists import models as codelist_models
from iati_organisation.models import (
DocumentLinkRecipientCountry, DocumentLinkTitle, Organisation,
OrganisationDocumentLink, OrganisationDocumentLinkCategory,
OrganisationDocumentLinkLanguage, OrganisationName, OrganisationNarrative,
OrganisationReportingOrganisation, RecipientCountryBudget,
RecipientCountryBudgetLine, RecipientOrgBudget, RecipientOrgBudgetLine,
RecipientRegionBudget, TotalBudget, TotalBudgetLine, TotalExpenditure
)
from iati_organisation.parser import post_save
from solr.organisation.tasks import OrganisationTaskIndexing
# Get an instance of a logger
logger = logging.getLogger(__name__)
class Parse(IatiParser):
"""
# NOTE: This parsed version is only covered the DataSet Organisation file
as the following:
- Total Budget
-- Total Budget Line
- Total Expenditure
-- Total Expenditure Line
- Organisation Document Link
# TODO: Cover others element as The IATI Organisation File Standard
http://reference.iatistandard.org/201/organisation-standard/overview
/organisation-file/
"""
organisation_identifier = ''
def __init__(self, *args, **kwargs):
super(Parse, self).__init__(*args, **kwargs)
self.VERSION = '2.01'
# default currency
self.default_currency = None
# We need a current index to put the current model
# on the process parse
self.organisation_document_link_current_index = 0
self.document_link_title_current_index = 0
self.document_link_language_current_index = 0
self.total_budget_current_index = 0
self.total_budget_line_current_index = 0
self.total_expenditure_current_index = 0
self.total_expenditure_line_current_index = 0
def add_narrative(self, element, parent):
default_lang = self.default_lang # set on activity (if set)
lang = element.attrib.get('{http://www.w3.org/XML/1998/namespace}lang')
text = element.text
language = self.get_or_none(codelist_models.Language, code=lang)
if not language:
language = default_lang
if not parent:
raise ParserError(
"Unknown",
"Narrative",
"parent object must be passed")
register_name = parent.__class__.__name__ + "Narrative"
if not language:
raise RequiredFieldError(
register_name,
"xml:lang",
"must specify xml:lang on iati-activity or xml:lang on \
the element itself")
if not text:
raise RequiredFieldError(
register_name,
"text",
"empty narrative")
narrative = OrganisationNarrative()
narrative.language = language
narrative.content = element.text
# This (instead of narrative.related_object) is required, otherwise
# related object doesn't get passed to the model_store (memory) and
# 'update_related()' fails.
# It should probably be passed to the __init__() ?
setattr(narrative, '_related_object', parent)
narrative.organisation = self.get_model('Organisation')
# TODO: handle this differently (also: breaks tests)
self.register_model(register_name, narrative)
def _get_currency_or_raise(self, model_name, currency):
"""
get default currency if not available for currency-related fields
"""
if not currency:
currency = getattr(self.get_model(
'Organisation'), 'default_currency')
if not currency:
raise RequiredFieldError(
model_name,
"currency",
"must specify default-currency on iati-organisation or \
as currency on the element itself")
return currency
def iati_organisations__iati_organisation(self, element):
org_id = element.xpath('organisation-identifier/text()')[0]
normalized_id = self._normalize(org_id)
last_updated_datetime = self.validate_date(
element.attrib.get('last-updated-datetime'))
# default is here to make it default to settings 'DEFAULT_LANG' on no
# language set (validation error we want to be flexible per instance)
default_lang_code = element.attrib.get(
'{http://www.w3.org/XML/1998/namespace}lang',
settings.DEFAULT_LANG)
if default_lang_code:
default_lang_code = default_lang_code.lower()
default_lang = self.get_or_none(
codelist_models.Language,
code=default_lang_code
)
default_currency = self.get_or_none(
codelist_models.Currency,
code=element.attrib.get('default-currency'))
if not org_id:
raise RequiredFieldError(
"",
"id",
"organisation: must contain organisation-identifier")
# TODO: check for last-updated-datetime - 2017-03-27
old_organisation = self.get_or_none(
Organisation, organisation_identifier=org_id)
if old_organisation:
OrganisationName.objects.filter(
organisation=old_organisation).delete()
OrganisationReportingOrganisation.objects.filter(
organisation=old_organisation).delete()
TotalBudget.objects.filter(
organisation=old_organisation).delete()
RecipientOrgBudget.objects.filter(
organisation=old_organisation).delete()
RecipientCountryBudget.objects.filter(
organisation=old_organisation).delete()
RecipientRegionBudget.objects.filter(
organisation=old_organisation).delete()
TotalExpenditure.objects.filter(
organisation=old_organisation).delete()
OrganisationDocumentLink.objects.filter(
organisation=old_organisation).delete()
organisation = old_organisation
else:
organisation = Organisation()
organisation.organisation_identifier = org_id
organisation.normalized_organisation_identifier = normalized_id
organisation.last_updated_datetime = last_updated_datetime
organisation.default_lang = default_lang
organisation.iati_standard_version_id = self.VERSION
organisation.default_currency = default_currency
organisation.published = True
organisation.ready_to_publish = True
organisation.modified = False
organisation.dataset = self.dataset
self.organisation_identifier = organisation.organisation_identifier
self.default_currency = default_currency
# for later reference
self.default_lang = default_lang
self.register_model('Organisation', organisation)
return element
def iati_organisations__iati_organisation__name(self, element):
name_list = self.get_model_list('OrganisationName')
if name_list and len(name_list) > 0:
raise FieldValidationError(
"name", "Duplicate names are not allowed")
organisation = self.get_model('Organisation')
name = OrganisationName()
name.organisation = organisation
self.register_model('OrganisationName', name)
return element
def iati_organisations__iati_organisation__name__narrative(self, element):
model = self.get_model('OrganisationName')
self.add_narrative(element, model)
if element.text:
organisation = self.get_model('Organisation')
if organisation.primary_name:
default_lang = self.default_lang # set on activity (if set)
lang = element.attrib.get(
'{http://www.w3.org/XML/1998/namespace}lang', default_lang)
if lang == 'en':
organisation.primary_name = element.text
else:
organisation.primary_name = element.text
return element
def iati_organisations__iati_organisation__reporting_org(self, element):
# Although OrganisationReportingOrganisation and Organisation has
# One-to-One relation on the database level, we check here whether
# element 'reporting-org' occurs only once in the parent element
# 'organisation'.
organisation = self.get_model('Organisation')
if 'OrganisationReportingOrganisation' in self.model_store:
for reporting_org in self.model_store[
'OrganisationReportingOrganisation']:
if reporting_org.organisation == organisation:
raise ParserError("Organisation",
"OrganisationReportingOrganisation",
"must occur no more than once.")
# narrative is parsed in different method but as it is required
# sub-element in 'name' element so we check it here.
narrative = element.xpath("narrative")
if len(narrative) < 1:
raise RequiredFieldError("OrganisationName", "narrative",
"must occur at least once.")
reporting_org_identifier = element.attrib.get("ref")
if reporting_org_identifier is None:
raise RequiredFieldError("OrganisationReportingOrganisation",
"ref", "required field missing.")
org_type = element.attrib.get("type")
if org_type is None:
raise RequiredFieldError("OrganisationReportingOrganisation",
"type", "required field missing.")
# here org_type is OrganisationType object.
org_type = self.get_or_none(codelist_models.OrganisationType,
code=org_type)
if org_type is None:
raise FieldValidationError(
"OrganisationReportingOrganisation",
"type",
"not found on the accompanying codelist.",
None,
None,
)
secondary_reporter = self.makeBool(element.attrib.get(
"secondary-reporter"))
reporting_org = OrganisationReportingOrganisation()
reporting_org.organisation = organisation
reporting_org.org_type = org_type
reporting_org.secondary_reporter = secondary_reporter
reporting_org.reporting_org_identifier = reporting_org_identifier
self.register_model("OrganisationReportingOrganisation", reporting_org)
return element
def iati_organisations__iati_organisation__reporting_org__narrative(
self, element):
"""atributes:
tag:narrative"""
model = self.get_model('OrganisationReportingOrganisation')
self.add_narrative(element, model)
return element
def iati_organisations__iati_organisation__total_budget(self, element):
"""atributes:
tag:total-budget"""
status = self.get_or_none(
codelist_models.BudgetStatus, code=element.attrib.get('status'))
model = self.get_model('Organisation')
total_budget = TotalBudget()
total_budget.organisation = model
if status:
total_budget.status = status
self.total_budget_current_index = \
self.register_model('TotalBudget', total_budget)
# store element
return element
def iati_organisations__iati_organisation__total_budget__period_start(
self, element):
"""atributes:
iso-date:2014-01-01
tag:period-start"""
model = self.get_model('TotalBudget', self.total_budget_current_index)
model.period_start = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__total_budget__period_end(
self, element):
"""atributes:
iso-date:2014-12-31
tag:period-end"""
model = self.get_model('TotalBudget', self.total_budget_current_index)
model.period_end = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__total_budget__value(
self, element):
"""atributes:
currency:USD
value-date:2014-01-0
tag:value"""
model = self.get_model('TotalBudget', self.total_budget_current_index)
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'total-budget/value',
element.attrib.get('currency')))
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__total_budget__budget_line(
self, element):
"""atributes:
ref:1234
tag:budget-line"""
budget_line = TotalBudgetLine()
budget_line.ref = element.attrib.get('ref', '-')
budget_line.total_budget = self.get_model(
'TotalBudget', self.total_budget_current_index)
self.total_budget_line_current_index = \
self.register_model('TotalBudgetLine', budget_line)
# store element
return element
def iati_organisations__iati_organisation__total_budget__budget_line__value(self, element): # NOQA: E501
"""atributes:
currency:USD
value-date:2014-01-01
tag:value"""
model = self.get_model('TotalBudgetLine',
self.total_budget_line_current_index)
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'total-budget/budget-line/value',
element.attrib.get('currency')))
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__total_budget__budget_line__narrative(self, element): # NOQA: E501
"""atributes:
tag:narrative"""
model = self.get_model('TotalBudgetLine',
self.total_budget_line_current_index)
self.add_narrative(element, model)
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget(
self, element):
"""atributes:
tag:recipient-org-budget"""
status = self.get_or_none(
codelist_models.BudgetStatus, code=element.attrib.get('status'))
model = self.get_model('Organisation')
recipient_org_budget = RecipientOrgBudget()
recipient_org_budget.organisation = model
if status:
recipient_org_budget.status = status
self.register_model('RecipientOrgBudget', recipient_org_budget)
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__recipient_org(self, element): # NOQA: E501
"""atributes:
ref:AA-ABC-1234567
tag:recipient-org"""
model = self.get_model('RecipientOrgBudget')
model.recipient_org_identifier = element.attrib.get('ref')
if Organisation.objects.filter(
organisation_identifier=element.attrib.get('ref')
).exists():
model.recipient_org = Organisation.objects.get(
pk=element.attrib.get('ref'))
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__recipient_org__narrative( # NOQA: E501
self, element):
"""atributes:
tag:narrative"""
model = self.get_model('RecipientOrgBudget')
self.add_narrative(element, model)
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__period_start(self, element): # NOQA: E501
"""atributes:
iso-date:2014-01-01
tag:period-start"""
model = self.get_model('RecipientOrgBudget')
model.period_start = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__period_end(self, element): # NOQA: E501
"""atributes:
iso-date:2014-12-31
tag:period-end"""
model = self.get_model('RecipientOrgBudget')
model.period_end = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__value(
self, element):
"""atributes:
currency:USD
value-date:2014-01-01
tag:value"""
model = self.get_model('RecipientOrgBudget')
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'recipient-org-budget/value',
element.attrib.get('currency')))
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__budget_line(self, element): # NOQA: E501
"""atributes:
ref:1234
tag:budget-line"""
self.get_model('RecipientOrgBudget')
budget_line = RecipientOrgBudgetLine()
budget_line.ref = element.attrib.get('ref')
self.register_model('RecipientOrgBudgetLine', budget_line)
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__budget_line__value( # NOQA: E501
self, element):
"""atributes:
currency:USD
value-date:2014-01-01
tag:value"""
model = self.get_model('RecipientOrgBudgetLine')
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'recipient-org-budget/budget-line/value',
element.attrib.get('currency')))
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__recipient_org_budget__budget_line__narrative( # NOQA: E501
self, element):
"""atributes:
tag:narrative"""
model = self.get_model('RecipientOrgBudgetLine')
self.add_narrative(element, model)
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget(
self, element):
"""atributes:
tag:recipient-country-budget"""
model = self.get_model('Organisation')
recipient_country_budget = RecipientCountryBudget()
recipient_country_budget.organisation = model
self.register_model('RecipientCountryBudget', recipient_country_budget)
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__recipient_country( # NOQA: E501
self, element):
"""atributes:
code:AF
tag:recipient-country"""
model = self.get_model('RecipientCountryBudget')
model.country = self.get_or_none(
Country, code=element.attrib.get('code'))
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__period_start( # NOQA: E501
self, element):
"""atributes:
iso-date:2014-01-01
tag:period-start"""
model = self.get_model('RecipientCountryBudget')
model.period_start = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__period_end(self, element): # NOQA: E501
"""atributes:
iso-date:2014-12-31
tag:period-end"""
model = self.get_model('RecipientCountryBudget')
model.period_end = self.validate_date(element.attrib.get('iso-date'))
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__value(
self, element):
"""atributes:
currency:USD
value-date:2014-01-01
tag:value"""
model = self.get_model('RecipientCountryBudget')
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'recipient-country-budget/value',
element.attrib.get('currency')))
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__budget_line( # NOQA: E501
self, element):
"""atributes:
ref:1234
tag:budget-line"""
self.get_model('RecipientCountryBudget')
budget_line = RecipientCountryBudgetLine()
budget_line.ref = element.attrib.get('ref', '-')
self.register_model('RecipientCountryBudgetLine', budget_line)
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__budget_line__value( # NOQA: E501
self, element):
"""atributes:
currency:USD
value-date:2014-01-01
tag:value"""
model = self.get_model('RecipientCountryBudgetLine')
model.currency = self.get_or_none(
codelist_models.Currency,
code=self._get_currency_or_raise(
'recipient-country-budget/budget-line/value',
element.attrib.get('currency')))
model.value = element.text
model.value_date = self.validate_date(element.attrib.get('value-date'))
model.value = element.text
# store element
return element
def iati_organisations__iati_organisation__recipient_country_budget__budget_line__narrative( # NOQA: E501
self, element):
"""atributes:
tag:narrative"""
model = self.get_model('RecipientCountryBudgetLine')
self.add_narrative(element, model)
# store element
return element
def iati_organisations__iati_organisation__document_link(self, element):
"""atributes:
format:application/vnd.oasis.opendocument.text
url:http:www.example.org/docs/report_en.odt
tag:document-link"""
model = self.get_model('Organisation')
document_link = OrganisationDocumentLink()
document_link.organisation = model
document_link.url = element.attrib.get('url')
document_link.file_format = self.get_or_none(
codelist_models.FileFormat, code=element.attrib.get('format'))
# Set the document link on the process
self.organisation_document_link_current_index = \
self.register_model('OrganisationDocumentLink', document_link)
# store element
return element
def iati_organisations__iati_organisation__document_link__title(
self, element):
"""atributes:
tag:title"""
document_link_title = DocumentLinkTitle()
self.document_link_title_current_index = \
self.register_model('DocumentLinkTitle', document_link_title)
model = self.get_model('OrganisationDocumentLink',
self.organisation_document_link_current_index)
document_link_title.document_link = model
# store element
return element
def iati_organisations__iati_organisation__document_link__title__narrative(
self, element):
"""atributes:
tag:narrative"""
model = self.get_model('DocumentLinkTitle',
self.document_link_title_current_index)
self.add_narrative(element, model)
# store element
return element
def iati_organisations__iati_organisation__document_link__category(
self, element):
"""atributes:
code:B01
tag:category"""
model = self.get_model('OrganisationDocumentLink',
self.organisation_document_link_current_index)
document_category = self.get_or_none(
codelist_models.DocumentCategory,
code=element.attrib.get('code'))
document_link_category = OrganisationDocumentLinkCategory()
document_link_category.category = document_category
document_link_category.document_link = model
self.register_model(
'OrganisationDocumentLinkCategory', document_link_category)
return element
def iati_organisations__iati_organisation__document_link__language(
self, element):
"""atributes:
code:en
tag:language"""
organisation_document_link_language = \
OrganisationDocumentLinkLanguage()
organisation_document_link_language.language = self.get_or_none(
codelist_models.Language,
code=element.attrib.get('code'))
model = self.get_model('OrganisationDocumentLink',
self.organisation_document_link_current_index)
organisation_document_link_language.document_link = model
self.document_link_language_current_index = self.register_model(
'OrganisationDocumentLinkLanguage',
organisation_document_link_language)
# store element
return element
def iati_organisations__iati_organisation__document_link__recipient_country( # NOQA: E501
self, element):
"""atributes:
code:AF
tag:recipient-country"""
model = self.get_model('OrganisationDocumentLink',
self.organisation_document_link_current_index)
country = self.get_or_none(Country, code=element.attrib.get('code'))
document_link_recipient_country = DocumentLinkRecipientCountry()
document_link_recipient_country.recipient_country = country
document_link_recipient_country.document_link = model
self.register_model('DocumentLinkRecipientCountry',
document_link_recipient_country)
# store element
return element
def post_save_models(self):
"""Perform all actions that need to happen after a single
organisation's been parsed."""
organisation = self.get_model('Organisation')
if not organisation:
return False
post_save.set_activity_reporting_organisation(organisation)
post_save.set_publisher_fk(organisation)
# Solr indexing
OrganisationTaskIndexing(instance=organisation).run()
def post_save_file(self, dataset, files_to_keep):
pass
def post_save_validators(self, dataset):
pass
| agpl-3.0 |
oneminot/xbmc | tools/EventClients/lib/python/ps3/keymaps.py | 245 | 2329 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# PS3 Remote and Controller Keymaps
keymap_remote = {
"16": 'power' ,#EJECT
"64": None ,#AUDIO
"65": None ,#ANGLE
"63": 'subtitle' ,#SUBTITLE
"0f": None ,#CLEAR
"28": None ,#TIME
"00": 'one' ,#1
"01": 'two' ,#2
"02": 'three' ,#3
"03": 'four' ,#4
"04": 'five' ,#5
"05": 'six' ,#6
"06": 'seven' ,#7
"07": 'eight' ,#8
"08": 'nine' ,#9
"09": 'zero' ,#0
"81": 'mytv' ,#RED
"82": 'mymusic' ,#GREEN
"80": 'mypictures' ,#BLUE
"83": 'myvideo' ,#YELLOW
"70": 'display' ,#DISPLAY
"1a": None ,#TOP MENU
"40": 'menu' ,#POP UP/MENU
"0e": None ,#RETURN
"5c": 'menu' ,#OPTIONS/TRIANGLE
"5d": 'back' ,#BACK/CIRCLE
"5e": 'info' ,#X
"5f": 'title' ,#VIEW/SQUARE
"54": 'up' ,#UP
"55": 'right' ,#RIGHT
"56": 'down' ,#DOWN
"57": 'left' ,#LEFT
"0b": 'select' ,#ENTER
"5a": 'volumeplus' ,#L1
"58": 'volumeminus' ,#L2
"51": 'Mute' ,#L3
"5b": 'pageplus' ,#R1
"59": 'pageminus' ,#R2
"52": None ,#R3
"43": None ,#PLAYSTATION
"50": None ,#SELECT
"53": None ,#START
"33": 'reverse' ,#<-SCAN
"34": 'forward' ,# SCAN->
"30": 'skipminus' ,#PREV
"31": 'skipplus' ,#NEXT
"60": None ,#<-SLOW/STEP
"61": None ,# SLOW/STEP->
"32": 'play' ,#PLAY
"38": 'stop' ,#STOP
"39": 'pause' ,#PAUSE
}
| gpl-2.0 |
loono/multitruth | lib/flask/testsuite/templating.py | 562 | 11237 | # -*- coding: utf-8 -*-
"""
flask.testsuite.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class TemplatingTestCase(FlaskTestCase):
def test_context_processing(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<p>23|42')
def test_original_win(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
def test_request_less_rendering(self):
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
self.assert_equal(rv, 'Hello Special World 42')
def test_standard_context(self):
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
self.assert_equal(rv.data.split(), [b'42', b'23', b'False', b'aha'])
def test_escaping(self):
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
self.assert_equal(lines, [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
])
def test_no_escaping(self):
app = flask.Flask(__name__)
with app.test_request_context():
self.assert_equal(flask.render_template_string('{{ foo }}',
foo='<test>'), '<test>')
self.assert_equal(flask.render_template('mail.txt', foo='<test>'),
'<test> Mail')
def test_macros(self):
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
self.assert_equal(macro('World'), 'Hello World!')
def test_template_filter(self):
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_name(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_global(self):
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
self.assert_in('get_stuff', app.jinja_env.globals.keys())
self.assert_equal(app.jinja_env.globals['get_stuff'], get_stuff)
self.assert_true(app.jinja_env.globals['get_stuff'](), 42)
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
self.assert_equal(rv, '42')
def test_custom_template_loader(self):
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello Custom World!')
def test_iterable_loader(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<h1>Jameson</h1>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TemplatingTestCase))
return suite
| apache-2.0 |
kastnerkyle/pylearn2 | pylearn2/cross_validation/mlp.py | 19 | 1687 | """
Cross-validation with MLPs.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
__maintainer__ = "Steven Kearnes"
from pylearn2.models.mlp import Layer, PretrainedLayer
class PretrainedLayerCV(Layer):
"""
Container of PretrainedLayer objects for use with TrainCV.
Parameters
----------
layer_name: str
Name of layer.
layer_content: array_like
Pretrained layer models for each dataset subset.
"""
def __init__(self, layer_name, layer_content):
self.layer_name = layer_name
self._folds = [PretrainedLayer(layer_name, subset_content)
for subset_content in layer_content]
def select_fold(self, k):
"""
Choose a single cross-validation fold to represent.
Parameters
----------
k : int
Index of selected fold.
"""
return self._folds[k]
def set_input_space(self, space):
"""
Set input space.
Parameters
----------
space : Space
The input space for this layer.
"""
return [fold.set_input_space(space) for fold in self._folds]
def get_params(self):
"""Get parameters."""
return self._folds[0].get_params()
def get_input_space(self):
"""Get input space."""
return self._folds[0].get_input_space()
def get_output_space(self):
"""Get output space."""
return self._folds[0].get_output_space()
def get_monitoring_channels(self):
"""Get monitoring channels."""
return self._folds[0].get_monitoring_channels()
| bsd-3-clause |
mhbu50/erpnext | erpnext/healthcare/doctype/lab_test/lab_test.py | 3 | 13678 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, ESS and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import getdate, cstr, get_link_to_form
class LabTest(Document):
def validate(self):
if not self.is_new():
self.set_secondary_uom_result()
def on_submit(self):
self.validate_result_values()
self.db_set('submitted_date', getdate())
self.db_set('status', 'Completed')
def on_cancel(self):
self.db_set('status', 'Cancelled')
self.reload()
def on_update(self):
if self.sensitivity_test_items:
sensitivity = sorted(self.sensitivity_test_items, key=lambda x: x.antibiotic_sensitivity)
for i, item in enumerate(sensitivity):
item.idx = i + 1
self.sensitivity_test_items = sensitivity
def after_insert(self):
if self.prescription:
frappe.db.set_value('Lab Prescription', self.prescription, 'lab_test_created', 1)
if frappe.db.get_value('Lab Prescription', self.prescription, 'invoiced'):
self.invoiced = True
if not self.lab_test_name and self.template:
self.load_test_from_template()
self.reload()
def load_test_from_template(self):
lab_test = self
create_test_from_template(lab_test)
self.reload()
def set_secondary_uom_result(self):
for item in self.normal_test_items:
if item.result_value and item.secondary_uom and item.conversion_factor:
try:
item.secondary_uom_result = float(item.result_value) * float(item.conversion_factor)
except:
item.secondary_uom_result = ''
frappe.msgprint(_('Row #{0}: Result for Secondary UOM not calculated'.format(item.idx)), title = _('Warning'))
def validate_result_values(self):
if self.normal_test_items:
for item in self.normal_test_items:
if not item.result_value and not item.allow_blank and item.require_result_value:
frappe.throw(_('Row #{0}: Please enter the result value for {1}').format(
item.idx, frappe.bold(item.lab_test_name)), title=_('Mandatory Results'))
if self.descriptive_test_items:
for item in self.descriptive_test_items:
if not item.result_value and not item.allow_blank and item.require_result_value:
frappe.throw(_('Row #{0}: Please enter the result value for {1}').format(
item.idx, frappe.bold(item.lab_test_particulars)), title=_('Mandatory Results'))
def create_test_from_template(lab_test):
template = frappe.get_doc('Lab Test Template', lab_test.template)
patient = frappe.get_doc('Patient', lab_test.patient)
lab_test.lab_test_name = template.lab_test_name
lab_test.result_date = getdate()
lab_test.department = template.department
lab_test.lab_test_group = template.lab_test_group
lab_test.legend_print_position = template.legend_print_position
lab_test.result_legend = template.result_legend
lab_test.worksheet_instructions = template.worksheet_instructions
lab_test = create_sample_collection(lab_test, template, patient, None)
lab_test = load_result_format(lab_test, template, None, None)
@frappe.whitelist()
def update_status(status, name):
if name and status:
frappe.db.set_value('Lab Test', name, {
'status': status,
'approved_date': getdate()
})
@frappe.whitelist()
def create_multiple(doctype, docname):
if not doctype or not docname:
frappe.throw(_('Sales Invoice or Patient Encounter is required to create Lab Tests'), title=_('Insufficient Data'))
lab_test_created = False
if doctype == 'Sales Invoice':
lab_test_created = create_lab_test_from_invoice(docname)
elif doctype == 'Patient Encounter':
lab_test_created = create_lab_test_from_encounter(docname)
if lab_test_created:
frappe.msgprint(_('Lab Test(s) {0} created successfully').format(lab_test_created), indicator='green')
else:
frappe.msgprint(_('No Lab Tests created'))
def create_lab_test_from_encounter(encounter):
lab_test_created = False
encounter = frappe.get_doc('Patient Encounter', encounter)
if encounter and encounter.lab_test_prescription:
patient = frappe.get_doc('Patient', encounter.patient)
for item in encounter.lab_test_prescription:
if not item.lab_test_created:
template = get_lab_test_template(item.lab_test_code)
if template:
lab_test = create_lab_test_doc(item.invoiced, encounter.practitioner, patient, template, encounter.company)
lab_test.save(ignore_permissions = True)
frappe.db.set_value('Lab Prescription', item.name, 'lab_test_created', 1)
if not lab_test_created:
lab_test_created = lab_test.name
else:
lab_test_created += ', ' + lab_test.name
return lab_test_created
def create_lab_test_from_invoice(sales_invoice):
lab_tests_created = False
invoice = frappe.get_doc('Sales Invoice', sales_invoice)
if invoice and invoice.patient:
patient = frappe.get_doc('Patient', invoice.patient)
for item in invoice.items:
lab_test_created = 0
if item.reference_dt == 'Lab Prescription':
lab_test_created = frappe.db.get_value('Lab Prescription', item.reference_dn, 'lab_test_created')
elif item.reference_dt == 'Lab Test':
lab_test_created = 1
if lab_test_created != 1:
template = get_lab_test_template(item.item_code)
if template:
lab_test = create_lab_test_doc(True, invoice.ref_practitioner, patient, template, invoice.company)
if item.reference_dt == 'Lab Prescription':
lab_test.prescription = item.reference_dn
lab_test.save(ignore_permissions = True)
if item.reference_dt != 'Lab Prescription':
frappe.db.set_value('Sales Invoice Item', item.name, 'reference_dt', 'Lab Test')
frappe.db.set_value('Sales Invoice Item', item.name, 'reference_dn', lab_test.name)
if not lab_tests_created:
lab_tests_created = lab_test.name
else:
lab_tests_created += ', ' + lab_test.name
return lab_tests_created
def get_lab_test_template(item):
template_id = frappe.db.exists('Lab Test Template', {'item': item})
if template_id:
return frappe.get_doc('Lab Test Template', template_id)
return False
def create_lab_test_doc(invoiced, practitioner, patient, template, company):
lab_test = frappe.new_doc('Lab Test')
lab_test.invoiced = invoiced
lab_test.practitioner = practitioner
lab_test.patient = patient.name
lab_test.patient_age = patient.get_age()
lab_test.patient_sex = patient.sex
lab_test.email = patient.email
lab_test.mobile = patient.mobile
lab_test.report_preference = patient.report_preference
lab_test.department = template.department
lab_test.template = template.name
lab_test.lab_test_group = template.lab_test_group
lab_test.result_date = getdate()
lab_test.company = company
return lab_test
def create_normals(template, lab_test):
lab_test.normal_toggle = 1
normal = lab_test.append('normal_test_items')
normal.lab_test_name = template.lab_test_name
normal.lab_test_uom = template.lab_test_uom
normal.secondary_uom = template.secondary_uom
normal.conversion_factor = template.conversion_factor
normal.normal_range = template.lab_test_normal_range
normal.require_result_value = 1
normal.allow_blank = 0
normal.template = template.name
def create_compounds(template, lab_test, is_group):
lab_test.normal_toggle = 1
for normal_test_template in template.normal_test_templates:
normal = lab_test.append('normal_test_items')
if is_group:
normal.lab_test_event = normal_test_template.lab_test_event
else:
normal.lab_test_name = normal_test_template.lab_test_event
normal.lab_test_uom = normal_test_template.lab_test_uom
normal.secondary_uom = normal_test_template.secondary_uom
normal.conversion_factor = normal_test_template.conversion_factor
normal.normal_range = normal_test_template.normal_range
normal.require_result_value = 1
normal.allow_blank = normal_test_template.allow_blank
normal.template = template.name
def create_descriptives(template, lab_test):
lab_test.descriptive_toggle = 1
if template.sensitivity:
lab_test.sensitivity_toggle = 1
for descriptive_test_template in template.descriptive_test_templates:
descriptive = lab_test.append('descriptive_test_items')
descriptive.lab_test_particulars = descriptive_test_template.particulars
descriptive.require_result_value = 1
descriptive.allow_blank = descriptive_test_template.allow_blank
descriptive.template = template.name
def create_sample_doc(template, patient, invoice, company = None):
if template.sample:
sample_exists = frappe.db.exists({
'doctype': 'Sample Collection',
'patient': patient.name,
'docstatus': 0,
'sample': template.sample
})
if sample_exists:
# update sample collection by adding quantity
sample_collection = frappe.get_doc('Sample Collection', sample_exists[0][0])
quantity = int(sample_collection.sample_qty) + int(template.sample_qty)
if template.sample_details:
sample_details = sample_collection.sample_details + '\n-\n' + _('Test: ')
sample_details += (template.get('lab_test_name') or template.get('template')) + '\n'
sample_details += _('Collection Details: ') + '\n\t' + template.sample_details
frappe.db.set_value('Sample Collection', sample_collection.name, 'sample_details', sample_details)
frappe.db.set_value('Sample Collection', sample_collection.name, 'sample_qty', quantity)
else:
# Create Sample Collection for template, copy vals from Invoice
sample_collection = frappe.new_doc('Sample Collection')
if invoice:
sample_collection.invoiced = True
sample_collection.patient = patient.name
sample_collection.patient_age = patient.get_age()
sample_collection.patient_sex = patient.sex
sample_collection.sample = template.sample
sample_collection.sample_uom = template.sample_uom
sample_collection.sample_qty = template.sample_qty
sample_collection.company = company
if template.sample_details:
sample_collection.sample_details = _('Test :') + (template.get('lab_test_name') or template.get('template')) + '\n' + 'Collection Detials:\n\t' + template.sample_details
sample_collection.save(ignore_permissions=True)
return sample_collection
def create_sample_collection(lab_test, template, patient, invoice):
if frappe.get_cached_value('Healthcare Settings', None, 'create_sample_collection_for_lab_test'):
sample_collection = create_sample_doc(template, patient, invoice, lab_test.company)
if sample_collection:
lab_test.sample = sample_collection.name
sample_collection_doc = get_link_to_form('Sample Collection', sample_collection.name)
frappe.msgprint(_('Sample Collection {0} has been created').format(sample_collection_doc),
title=_('Sample Collection'), indicator='green')
return lab_test
def load_result_format(lab_test, template, prescription, invoice):
if template.lab_test_template_type == 'Single':
create_normals(template, lab_test)
elif template.lab_test_template_type == 'Compound':
create_compounds(template, lab_test, False)
elif template.lab_test_template_type == 'Descriptive':
create_descriptives(template, lab_test)
elif template.lab_test_template_type == 'Grouped':
# Iterate for each template in the group and create one result for all.
for lab_test_group in template.lab_test_groups:
# Template_in_group = None
if lab_test_group.lab_test_template:
template_in_group = frappe.get_doc('Lab Test Template', lab_test_group.lab_test_template)
if template_in_group:
if template_in_group.lab_test_template_type == 'Single':
create_normals(template_in_group, lab_test)
elif template_in_group.lab_test_template_type == 'Compound':
normal_heading = lab_test.append('normal_test_items')
normal_heading.lab_test_name = template_in_group.lab_test_name
normal_heading.require_result_value = 0
normal_heading.allow_blank = 1
normal_heading.template = template_in_group.name
create_compounds(template_in_group, lab_test, True)
elif template_in_group.lab_test_template_type == 'Descriptive':
descriptive_heading = lab_test.append('descriptive_test_items')
descriptive_heading.lab_test_name = template_in_group.lab_test_name
descriptive_heading.require_result_value = 0
descriptive_heading.allow_blank = 1
descriptive_heading.template = template_in_group.name
create_descriptives(template_in_group, lab_test)
else: # Lab Test Group - Add New Line
normal = lab_test.append('normal_test_items')
normal.lab_test_name = lab_test_group.group_event
normal.lab_test_uom = lab_test_group.group_test_uom
normal.secondary_uom = lab_test_group.secondary_uom
normal.conversion_factor = lab_test_group.conversion_factor
normal.normal_range = lab_test_group.group_test_normal_range
normal.allow_blank = lab_test_group.allow_blank
normal.require_result_value = 1
normal.template = template.name
if template.lab_test_template_type != 'No Result':
if prescription:
lab_test.prescription = prescription
if invoice:
frappe.db.set_value('Lab Prescription', prescription, 'invoiced', True)
lab_test.save(ignore_permissions=True) # Insert the result
return lab_test
@frappe.whitelist()
def get_employee_by_user_id(user_id):
emp_id = frappe.db.exists('Employee', { 'user_id': user_id })
if emp_id:
return frappe.get_doc('Employee', emp_id)
return None
@frappe.whitelist()
def get_lab_test_prescribed(patient):
return frappe.db.sql(
'''
select
lp.name,
lp.lab_test_code,
lp.parent,
lp.invoiced,
pe.practitioner,
pe.practitioner_name,
pe.encounter_date
from
`tabPatient Encounter` pe, `tabLab Prescription` lp
where
pe.patient=%s
and lp.parent=pe.name
and lp.lab_test_created=0
''', (patient))
| gpl-3.0 |
thnee/ansible | lib/ansible/modules/crypto/openssl_privatekey.py | 2 | 34747 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: openssl_privatekey
version_added: "2.3"
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_(cryptosystem)),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
version_added: "2.8"
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
version_added: "2.4"
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
version_added: "2.4"
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
version_added: "2.8"
format:
description:
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
is used for all keys which support it. Please note that not every key can be exported in any format.
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
but for existing private key files, it will not force a regenerate when its format is not the automatically
selected one for generation.
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
To change this behavior, use the I(format_mismatch) option.
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
fail if a value different from C(auto_ignore) is used.
type: str
default: auto_ignore
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
version_added: "2.10"
format_mismatch:
description:
- Determines behavior of the module if the format of a private key does not match the expected format, but all
other parameters are as expected.
- If set to C(regenerate) (default), generates a new private key.
- If set to C(convert), the key will be converted to the new format instead.
- Only supported by the C(cryptography) backend.
type: str
default: regenerate
choices: [ regenerate, convert ]
version_added: "2.10"
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
version_added: "2.8"
extends_documentation_fragment:
- files
seealso:
- module: openssl_certificate
- module: openssl_csr
- module: openssl_dhparam
- module: openssl_pkcs12
- module: openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
from ansible.module_utils.crypto import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
)
from ansible.module_utils import crypto as crypto_utils
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
pass
class PrivateKeyBase(crypto_utils.OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.format = module.params['format']
self.format_mismatch = module.params['format_mismatch']
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key(self):
"""(Re-)Generate private key."""
pass
@abc.abstractmethod
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
# Regenerate
if self.backup:
self.backup_file = module.backup_local(self.path)
self._generate_private_key()
privatekey_data = self._get_private_key_data()
crypto_utils.write_file(module, privatekey_data, 0o600)
self.changed = True
elif not self.check(module, perms_required=False, ignore_conversion=False):
# Convert
if self.backup:
self.backup_file = module.backup_local(self.path)
privatekey_data = self._get_private_key_data()
crypto_utils.write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
@abc.abstractmethod
def _check_format(self):
pass
def check(self, module, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required)
if not state_and_perms or not self._check_passphrase():
return False
if not self._check_size_and_type():
return False
if not self._check_format():
if not ignore_conversion or self.format_mismatch != 'convert':
return False
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def _generate_private_key(self):
"""(Re-)Generate private key."""
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return crypto_utils.get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
crypto_utils.load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
try:
privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
except crypto_utils.OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
return _check_size(privatekey) and _check_type(privatekey)
def _check_format(self):
# Not supported by this backend
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def _generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.privatekey.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as e:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
try:
# Read bytes
with open(self.path, 'rb') as f:
data = f.read()
# Interpret bytes depending on format.
format = crypto_utils.identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return crypto_utils.get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
data = f.read()
format = crypto_utils.identify_private_key_format(data)
if format == 'raw':
# Raw keys cannot be encrypted
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
privatekey = self._load_privatekey()
self.privatekey = privatekey
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == privatekey.key_size
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](privatekey)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
with open(self.path, 'rb') as f:
content = f.read()
format = crypto_utils.identify_private_key_format(content)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated', version='2.13')
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = module.params['force'] or not private_key.check(module)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except crypto_utils.OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
| gpl-3.0 |
pichuang/ryu | ryu/app/simple_switch.py | 25 | 3755 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An OpenFlow 1.0 L2 learning switch implementation.
"""
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.lib.mac import haddr_to_bin
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
class SimpleSwitch(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port, dl_dst=haddr_to_bin(dst))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=ofproto.OFP_DEFAULT_PRIORITY,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
pkt = packet.Packet(msg.data)
eth = pkt.get_protocol(ethernet.ethernet)
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, msg.in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = msg.in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, msg.in_port, dst, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=msg.in_port,
actions=actions, data=data)
datapath.send_msg(out)
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def _port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
port_no = msg.desc.port_no
ofproto = msg.datapath.ofproto
if reason == ofproto.OFPPR_ADD:
self.logger.info("port added %s", port_no)
elif reason == ofproto.OFPPR_DELETE:
self.logger.info("port deleted %s", port_no)
elif reason == ofproto.OFPPR_MODIFY:
self.logger.info("port modified %s", port_no)
else:
self.logger.info("Illeagal port state %s %s", port_no, reason)
| apache-2.0 |
SecurityNik/QRadar---Threat-Intelligence-On-The-Cheap | SecurityNikThreatIntel.py | 1 | 13383 | #!/usr/bin/env python
# This is code is designed to download list of known bad IPs and domains
# Once the lists have been downloaded, 2 reference sets are created
# 1 for IPs and 1 for domains
# Manual creation of QRadar rules are then done. These rules are then run against these
# list to identify known bad IPs and Domain
#
# SecurityNikThreatIntel.py v1.0
# Author: Nik Alleyne, CISSP|GCIH|A < nikalleyne@gmail.com >
# Date: 2015-02-25
# Disclaimer: In no way am I responsible for any damages which you may
# cause to your system by running this script.
from os import uname, path, system, remove, getcwd
from shutil import rmtree,copytree
from subprocess import call
from sys import exit
from time import sleep
# This function checks to see if this script is running on Linux.
def check_os():
qRadar_path = '/opt/qradar/conf/'
qRadar_ver = '/opt/qradar/bin/myver'
print(' Checking OS ... ')
if ( uname()[0] == 'Linux' ) or ( uname()[0] == 'linux'):
#print(' Running on Linux ... ')
if ( path.exists('/etc/system-release') and path.isfile('/etc/system-release') ):
call(['cat', '/etc/system-release'])
else:
print('\n Looks like you are running Linux. ')
print('\n However, I am unable to determine your version info. ')
print(' \n Looking for an installed version of QRadar')
if ( path.exists(qRadar_path) and ( path.isdir(qRadar_path)) ):
print(' \n looks like you are running QRadar version ... ')
call([qRadar_ver])
print(' \n Good stuff ... \n Blast off =>>>>>>> ')
else:
print(' An installed version of QRadar was not found on your system ')
print(' This script will not work for you, it was designed to be used on box running IBM QRadar ')
print(' Exiting ... ')
exit(0)
sleep(2)
else:
print(' Running this is a waste of your time. ')
print(' This script is SPECIFICALLY for QRadar ')
exit(0)
# This function downloads a list of known bad IPs and
def grab_ip_list():
ip_path = ''
bad_ip_list = ['http://malc0de.com/bl/IP_Blacklist.txt' ,
'http://malc0de.com/bl/IP_Blacklist.txt',
'http://www.malwaredomainlist.com/hostslist/ip.txt',
'https://zeustracker.abuse.ch/blocklist.php?download=badips' ,
'http://www.spamhaus.org/drop/drop.txt',
'http://www.spamhaus.org/drop/edrop.txt',
'http://www.spamhaus.org/drop/drop.lasso',
'http://www.okean.com/chinacidr.txt' ,
'http://myip.ms/files/blacklist/general/latest_blacklist.txt' ,
'http://myip.ms/files/blacklist/csf/latest_blacklist.txt' ,
'http://rules.emergingthreats.net/fwrules/emerging-Block-IPs.txt' ,
'http://rules.emergingthreats.net/blockrules/compromised-ips.txt' ,
'http://feeds.dshield.org/block.txt' ,
'http://feeds.dshield.org/top10-2.txt',
'http://www.dshield.org/feeds/topips.txt'
'https://feodotracker.abuse.ch/blocklist/?download=ipblocklist',
'https://palevotracker.abuse.ch/blocklists.php?download=ipblocklist' ,
'https://zeustracker.abuse.ch/blocklist.php?download=badips' ,
]
# Check to see if ip_tmp/ folder exists - This folder stores the files a the first download.
# Basically this will determine if its the first time the script is being run
if ( path.exists('.ip_tmp/') and (path.isdir('.ip_tmp/')) ):
ip_path = '.ip_tmp_path/'
else:
ip_path = '.ip_tmp/'
try:
print(' Preparing to download list of bad IP addresses ')
for link in bad_ip_list:
print(link)
call(['wget', link, '--directory-prefix='+ip_path , '--tries=2', '--continue', '--timestamping', '--timeout=5', '--random-wait', '--no-proxy', '--inet4-only'])
print(' \n %s \n retrieved successfully \n' %link )
sleep(2)
except:
print(' A problem occurred while downloading IP information from %s ' %link )
print(' This link may be broken. Please copy the URL and paste into a browser to ensure it is accessible')
else:
# Looks like all went well
print(' \n Looks like we have some baddddd IPs! ')
# This fuction download the list of malicious and or suspected domains
# DO NOT add entry to this list unless you are sure what you are doing
# These files are in different formats, thus may need to be manipulated the files individually
def grab_dns_list():
dns_path = ''
bad_dns_list = [ 'http://www.joewein.net/dl/bl/dom-bl.txt',
'http://www.joewein.net/dl/bl/dom-bl-base.txt',
'http://mirror1.malwaredomains.com/files/immortal_domains.txt',
'http://mirror1.malwaredomains.com/files/dynamic_dns.txt',
'https://zeustracker.abuse.ch/blocklist.php?download=baddomains',
'http://www.malwaredomainlist.com/hostslist/hosts.txt',
'http://malc0de.com/bl/BOOT',
'http://malc0de.com/bl/ZONES'
]
if ( path.exists('.dns_tmp') and (path.isdir('.dns_tmp')) ):
dns_path = '.dns_tmp_path'
else:
dns_path = '.dns_tmp'
try:
print(' Preparing to download list of bad Domain ')
for dns in bad_dns_list:
print(dns)
call(['wget', dns, '--directory-prefix='+dns_path , '--tries=2', '--continue', '--timestamping', '--timeout=5', '--random-wait', '--no-proxy', '--inet4-only'])
print(' \n %s \n retrieved successfully \n' %dns )
sleep(2)
except:
print(' A problem occurred while downloading DNS information from %s ' %dns )
print(' This link may be broken. Please copy the URL and paste into a browser to ensure it is accessible')
else:
# Looks like all went well
print(' \n Looks like we have some baddddd domains! ')
# Checking the directories to see if the last run added new info
def compare_ip_dirs():
print(' Checking if there is need for an update .... ')
#first check to see if .ip_tmp_path exists
if ( path.exists('.ip_tmp_path') and (path.isdir('.ip_tmp_path')) ):
print(' Give me just a few seconds more')
sleep(2)
if ( int(path.getsize('.ip_tmp')) <= int(path.getsize('.ip_tmp_path')) ):
print(' \n Looks like new content is available ')
# copying new content in .ip_tmp_path to .ip_tmp
try:
rmtree('.ip_tmp')
copytree('.ip_tmp_path','.ip_tmp')
except:
print(' Failed to copy new data ... ')
print(' Exiting ... ')
exit(0)
else:
print(' Successfully moved new data')
else:
print(' Nothing new was added ... ')
print(' Exiting ... ')
exit(0)
else:
print(' This is first run ... \n moving on ... ')
sleep(2)
# Comparing the DNS folders to see if new content may have been added
def compare_dns_dirs():
print(' Checking if there is need for an update .... ')
#first check to see if .ip_tmp_path exists
if ( path.exists('.ip_tmp_path') and (path.isdir('.ip_tmp_path')) ):
print(' Give me just a few seconds more')
sleep(2)
if ( int(path.getsize('.ip_tmp')) <= int(path.getsize('.ip_tmp_path')) ):
print(' \n Looks like new content is available ')
# copying new content in .dns_tmp_path to .dns_tmp
try:
rmtree('.dns_tmp')
copytree('.dns_tmp_path','.dns_tmp')
except:
print(' Failed to copy new data ... ')
print(' Exiting ... ')
exit(0)
else:
print(' Successfully moved new data')
else:
print(' Nothing new was added ... ')
print(' Exiting ... ')
exit(0)
else:
print(' This is first run ... \n moving on ... ')
sleep(2)
# Now that the files have been successfully downloaded, let's combine them all
def combine_ip_files():
print(' \n Checking for .ip_tmp folder ... ')
sleep(2)
if ( path.exists('.ip_tmp') and path.isdir('.ip_tmp') ):
print(' directory .ip_tmp/ found ')
system('cat .ip_tmp/* | grep --perl-regexp --only-matching "\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" | sort -i | uniq --unique --check-chars=15 > SecurityNikBadIPs.txt')
if ( path.exists('SecurityNikBadIPs.txt') and path.isfile('SecurityNikBadIPs.txt') ):
print(' Successfully created file SecurityNikBadIPs.txt ')
else:
print(' Unable to create SecurityNikBadIPs.txt file ')
print(' The program will now exit ... Exiting ... ')
exit(0)
else:
print(' \n ip_tmp/ directory not found ')
print(' Unable to continue ... Exiting!')
exit(0)
# This function manipulates the downloaded DNS files, so that all can be placed into one standard file
def combine_dns_files():
print(' Combining DNS files ')
if ( path.exists('.dns_tmp') and path.isdir('.dns_tmp') ):
print(' directory .dns_tmp/ found ')
try:
print(' Combining downloaded files into .... ')
system('cat .dns_tmp/dom-bl.txt > .SecurityNikBadDomains.txt')
system('cat .dns_tmp/dom-bl-base.txt >> .SecurityNikBadDomains.txt')
system("cat .dns_tmp/hosts.txt | awk '/127.0.0.1/ { print $2 }' >> .SecurityNikBadDomains.txt")
system('cat .dns_tmp/immortal_domains.txt | grep -i -P "This is a list|^$" -v >> SecurityNikBadDomains.txt')
system('cat .dns_tmp/BOOT | grep -i PRIMARY | cut -f 2 -d " " | grep -i -v -P "ibm\.com" -v >> .SecurityNikBadDomains.txt')
system('cat .dns_tmp/dynamic_dns.txt | grep -P -v "^#|^$" | cut -f 1 -s >> .SecurityNikBadDomains.txt')
system('cat .dns_tmp/blocklist.php\?download\=baddomains | grep -P -v "^#|^$" >> .SecurityNikBadDomains.txt')
system('cat .SecurityNikBadDomains.txt | sort -i | uniq --unique > SecurityNikBadDomains.txt')
except:
print(' Looks like an error occurred while combining the files')
print(' Please retry later ... \n Exiting ... ')
exit(0)
else:
print(' files successfully combined ')
print(' A list of known bad domains can be found in SecurityNikBadDomains.txt')
remove('.SecurityNikBadDomains.txt')
else:
print(' \n dns_tmp/ directory not found ')
print(' The program will now exit ... Exiting ... ')
exit(0)
# This function does all the work for the IP reference set
def verify_create_ip_reference_set():
reference_set_name = 'SecurityNik_IP_Darklist'
ip_txt = getcwd()+'/SecurityNikBadIPs.txt'
rows = []
print('Checking to see if the reference set %s already exists' %reference_set_name)
f =open('.count.txt', 'w')
call(["psql", "-U", "qradar", "--command=SELECT COUNT(*) FROM reference_data WHERE name='SecurityNik_IP_Darklist'"], stdout=f )
f.close()
# Resting ... I'm tired
sleep(2)
f = open('.count.txt', 'r')
for line in f.readlines():
rows.append(line.strip())
#print(rows)
if (rows[2].strip() != '0'):
print(' Looks like reference set already exists \n ')
else:
print(' Reference Set %s not found ... %reference_set_name ')
print(' Looks like we will have to create this bad boy ...')
try:
call(['/opt/qradar/bin/ReferenceSetUtil.sh', 'create', reference_set_name , 'IP'])
print(' Successfully created reference set %s \n ' %reference_set_name )
#print(' Looks like that went well ... ' )
except:
#This does not catch any java exception that may be created
print(' Error occurred while creating reference set %s ' %reference_set)
print(' You may create the reference set %s manually if needed ' %reference_set_name )
exit(0)
print(' Loading information into reference set %s ' %reference_set_name )
try:
call(['/opt/qradar/bin/ReferenceSetUtil.sh', 'load', reference_set_name , ip_txt ])
print(' \n You may need to verify that you have rules created to use %s ' %reference_set_name )
except:
print(' An error occurred while loading the reference set ... ')
print(' Please retry later!')
exit(0)
remove('.count.txt')
# This function creates the DNS reference set
def verify_create_dns_reference_set():
reference_set_name = 'SecurityNik_DNS_Darklist'
dns_txt = getcwd()+'/SecurityNikBadDomains.txt'
dns_rows = []
print('Checking to see if the reference set %s already exists' %reference_set_name)
f = open('.count.txt', 'w')
call(["psql", "-U", "qradar", "--command=SELECT COUNT(*) FROM reference_data WHERE name='SecurityNik_DNS_Darklist'"], stdout=f )
f.close()
# Taking a nap ...
sleep(2)
f = open('.count.txt', 'r')
for line in f.readlines():
dns_rows.append(line.strip())
#print(dns_rows)
if (dns_rows[2].strip() != '0'):
print(' Looks like reference set already exists \n ')
else:
print(' Reference Set %s not found ' %reference_set_name )
print(' Looks like we will have to create this bad boy ...')
try:
call(['/opt/qradar/bin/ReferenceSetUtil.sh', 'create', reference_set_name , 'ALN'])
print(' Successfully created reference set %s ' %reference_set_name )
#print(' Looks like that went well ... ' )
except:
# This does not catch any java exception that may be created
print(' Error occurred while creating reference set %s ' %reference_set)
print(' You may create the reference set %s manually if needed ' %reference_set_name )
exit(0)
print(' Loading information into reference set %s ' %reference_set_name )
try:
call(['/opt/qradar/bin/ReferenceSetUtil.sh', 'load', reference_set_name , dns_txt ])
print(' \n You may need to verify that you have rules created to use %s ' %reference_set_name )
except:
print(' An error occurred while loading the reference set ... ')
print(' Please retry later!')
exit(0)
remove('.count.txt')
# Main Function
def main():
#print('You are in the main part of the code')
call('clear')
check_os()
# Let's work on the IP Reference Set
grab_ip_list()
compare_ip_dirs()
combine_ip_files()
verify_create_ip_reference_set()
# Let's work on the DNS Reference Set
grab_dns_list()
compare_dns_dirs()
combine_dns_files()
verify_create_dns_reference_set()
if __name__ == "__main__":
main()
| gpl-3.0 |
infowantstobeseen/pyglet-darwincore | contrib/layout/layout/builders/htmlstylesheet.py | 29 | 2993 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from layout.css import *
__all__ = ['html4_default_stylesheet']
# Default stylesheet for HTML 4
# http://www.w3.org/TR/CSS21/sample.html
html4_default_stylesheet = Stylesheet('''
html, address,
blockquote,
body, dd, div,
dl, dt, fieldset, form,
frame, frameset,
h1, h2, h3, h4,
h5, h6, noframes,
ol, p, ul, center,
dir, hr, menu, pre { display: block }
html { font-family: serif }
li { display: list-item }
head { display: none }
table { display: table }
tr { display: table-row }
thead { display: table-header-group }
tbody { display: table-row-group }
tfoot { display: table-footer-group }
col { display: table-column }
colgroup { display: table-column-group }
td, th { display: table-cell }
caption { display: table-caption }
th { font-weight: bolder; text-align: center }
caption { text-align: center }
body { margin: 8px }
h1 { font-size: 2em; margin: .67em 0 }
h2 { font-size: 1.5em; margin: .75em 0 }
h3 { font-size: 1.17em; margin: .83em 0 }
h4, p,
blockquote, ul,
fieldset, form,
ol, dl, dir,
menu { margin: 1.12em 0 }
h5 { font-size: .83em; margin: 1.5em 0 }
h6 { font-size: .75em; margin: 1.67em 0 }
h1, h2, h3, h4,
h5, h6, b,
strong { font-weight: bolder }
blockquote { margin-left: 40px; margin-right: 40px }
i, cite, em,
var, address { font-style: italic }
pre, tt, code,
kbd, samp { font-family: monospace }
pre { white-space: pre }
button, textarea,
input, select { display: inline-block }
big { font-size: 1.17em }
small, sub, sup { font-size: .83em }
sub { vertical-align: sub }
sup { vertical-align: super }
table { border-spacing: 2px; }
thead, tbody,
tfoot { vertical-align: middle }
td, th { vertical-align: inherit }
s, strike, del { text-decoration: line-through }
hr { border: 1px inset }
ol, ul, dir,
menu, dd { margin-left: 40px }
ol { list-style-type: decimal }
ol ul, ul ol,
ul ul, ol ol { margin-top: 0; margin-bottom: 0 }
u, ins { text-decoration: underline }
br:before { content: "\A" }
/* XXX pseudo elements not supported yet
:before, :after { white-space: pre-line } */
center { text-align: center }
:link, :visited { text-decoration: underline }
a:hover { color: red } /* XXX outside CSS recommendation */
:focus { outline: thin dotted invert }
/* Begin bidirectionality settings (do not change) */
BDO[dir="ltr"] { direction: ltr; unicode-bidi: bidi-override }
BDO[dir="rtl"] { direction: rtl; unicode-bidi: bidi-override }
*[dir="ltr"] { direction: ltr; unicode-bidi: embed }
*[dir="rtl"] { direction: rtl; unicode-bidi: embed }
''')
| bsd-3-clause |
endlessm/chromium-browser | third_party/angle/third_party/VK-GL-CTS/src/external/fetch_sources.py | 1 | 11339 | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import shutil
import tarfile
import hashlib
import argparse
import subprocess
import ssl
import stat
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "scripts"))
from build.common import *
EXTERNAL_DIR = os.path.realpath(os.path.normpath(os.path.dirname(__file__)))
def computeChecksum (data):
return hashlib.sha256(data).hexdigest()
def onReadonlyRemoveError (func, path, exc_info):
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
class Source:
def __init__(self, baseDir, extractDir):
self.baseDir = baseDir
self.extractDir = extractDir
def clean (self):
fullDstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
# Remove read-only first
readonlydir = os.path.join(fullDstPath, ".git", "objects", "pack")
if os.path.exists(readonlydir):
shutil.rmtree(readonlydir, onerror = onReadonlyRemoveError )
if os.path.exists(fullDstPath):
shutil.rmtree(fullDstPath, ignore_errors=False)
class SourcePackage (Source):
def __init__(self, url, filename, checksum, baseDir, extractDir = "src", postExtract=None):
Source.__init__(self, baseDir, extractDir)
self.url = url
self.filename = filename
self.checksum = checksum
self.archiveDir = "packages"
self.postExtract = postExtract
def clean (self):
Source.clean(self)
self.removeArchives()
def update (self, cmdProtocol = None):
if not self.isArchiveUpToDate():
self.fetchAndVerifyArchive()
if self.getExtractedChecksum() != self.checksum:
Source.clean(self)
self.extract()
self.storeExtractedChecksum(self.checksum)
def removeArchives (self):
archiveDir = os.path.join(EXTERNAL_DIR, pkg.baseDir, pkg.archiveDir)
if os.path.exists(archiveDir):
shutil.rmtree(archiveDir, ignore_errors=False)
def isArchiveUpToDate (self):
archiveFile = os.path.join(EXTERNAL_DIR, pkg.baseDir, pkg.archiveDir, pkg.filename)
if os.path.exists(archiveFile):
return computeChecksum(readBinaryFile(archiveFile)) == self.checksum
else:
return False
def getExtractedChecksumFilePath (self):
return os.path.join(EXTERNAL_DIR, pkg.baseDir, pkg.archiveDir, "extracted")
def getExtractedChecksum (self):
extractedChecksumFile = self.getExtractedChecksumFilePath()
if os.path.exists(extractedChecksumFile):
return readFile(extractedChecksumFile)
else:
return None
def storeExtractedChecksum (self, checksum):
checksum_bytes = checksum.encode("utf-8")
writeBinaryFile(self.getExtractedChecksumFilePath(), checksum_bytes)
def connectToUrl (self, url):
result = None
if sys.version_info < (3, 0):
from urllib2 import urlopen
else:
from urllib.request import urlopen
if args.insecure:
print("Ignoring certificate checks")
ssl_context = ssl._create_unverified_context()
result = urlopen(url, context=ssl_context)
else:
result = urlopen(url)
return result
def fetchAndVerifyArchive (self):
print("Fetching %s" % self.url)
req = self.connectToUrl(self.url)
data = req.read()
checksum = computeChecksum(data)
dstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.archiveDir, self.filename)
if checksum != self.checksum:
raise Exception("Checksum mismatch for %s, expected %s, got %s" % (self.filename, self.checksum, checksum))
if not os.path.exists(os.path.dirname(dstPath)):
os.mkdir(os.path.dirname(dstPath))
writeBinaryFile(dstPath, data)
def extract (self):
print("Extracting %s to %s/%s" % (self.filename, self.baseDir, self.extractDir))
srcPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.archiveDir, self.filename)
tmpPath = os.path.join(EXTERNAL_DIR, ".extract-tmp-%s" % self.baseDir)
dstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
archive = tarfile.open(srcPath)
if os.path.exists(tmpPath):
shutil.rmtree(tmpPath, ignore_errors=False)
os.mkdir(tmpPath)
archive.extractall(tmpPath)
archive.close()
extractedEntries = os.listdir(tmpPath)
if len(extractedEntries) != 1 or not os.path.isdir(os.path.join(tmpPath, extractedEntries[0])):
raise Exception("%s doesn't contain single top-level directory" % self.filename)
topLevelPath = os.path.join(tmpPath, extractedEntries[0])
if not os.path.exists(dstPath):
os.mkdir(dstPath)
for entry in os.listdir(topLevelPath):
if os.path.exists(os.path.join(dstPath, entry)):
raise Exception("%s exists already" % entry)
shutil.move(os.path.join(topLevelPath, entry), dstPath)
shutil.rmtree(tmpPath, ignore_errors=True)
if self.postExtract != None:
self.postExtract(dstPath)
class SourceFile (Source):
def __init__(self, url, filename, checksum, baseDir, extractDir = "src"):
Source.__init__(self, baseDir, extractDir)
self.url = url
self.filename = filename
self.checksum = checksum
def update (self, cmdProtocol = None):
if not self.isFileUpToDate():
Source.clean(self)
self.fetchAndVerifyFile()
def isFileUpToDate (self):
file = os.path.join(EXTERNAL_DIR, pkg.baseDir, pkg.extractDir, pkg.filename)
if os.path.exists(file):
data = readFile(file)
return computeChecksum(data.encode('utf-8')) == self.checksum
else:
return False
def connectToUrl (self, url):
result = None
if sys.version_info < (3, 0):
from urllib2 import urlopen
else:
from urllib.request import urlopen
if args.insecure:
print("Ignoring certificate checks")
ssl_context = ssl._create_unverified_context()
result = urlopen(url, context=ssl_context)
else:
result = urlopen(url)
return result
def fetchAndVerifyFile (self):
print("Fetching %s" % self.url)
req = self.connectToUrl(self.url)
data = req.read()
checksum = computeChecksum(data)
dstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir, self.filename)
if checksum != self.checksum:
raise Exception("Checksum mismatch for %s, expected %s, got %s" % (self.filename, self.checksum, checksum))
if not os.path.exists(os.path.dirname(dstPath)):
os.mkdir(os.path.dirname(dstPath))
writeBinaryFile(dstPath, data)
class GitRepo (Source):
def __init__(self, httpsUrl, sshUrl, revision, baseDir, extractDir = "src", removeTags = []):
Source.__init__(self, baseDir, extractDir)
self.httpsUrl = httpsUrl
self.sshUrl = sshUrl
self.revision = revision
self.removeTags = removeTags
def detectProtocol(self, cmdProtocol = None):
# reuse parent repo protocol
proc = subprocess.Popen(['git', 'ls-remote', '--get-url', 'origin'], stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise Exception("Failed to execute 'git ls-remote origin', got %d" % proc.returncode)
if (stdout[:3] == 'ssh') or (stdout[:3] == 'git'):
protocol = 'ssh'
else:
# remote 'origin' doesn't exist, assume 'https' as checkout protocol
protocol = 'https'
return protocol
def selectUrl(self, cmdProtocol = None):
try:
if cmdProtocol == None:
protocol = self.detectProtocol(cmdProtocol)
else:
protocol = cmdProtocol
except:
# fallback to https on any issues
protocol = 'https'
if protocol == 'ssh':
if self.sshUrl != None:
url = self.sshUrl
else:
assert self.httpsUrl != None
url = self.httpsUrl
else:
assert protocol == 'https'
url = self.httpsUrl
assert url != None
return url
def update (self, cmdProtocol = None):
fullDstPath = os.path.join(EXTERNAL_DIR, self.baseDir, self.extractDir)
url = self.selectUrl(cmdProtocol)
if not os.path.exists(os.path.join(fullDstPath, '.git')):
execute(["git", "clone", "--no-checkout", url, fullDstPath])
pushWorkingDir(fullDstPath)
try:
for tag in self.removeTags:
proc = subprocess.Popen(['git', 'tag', '-l', tag], stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode == 0:
execute(["git", "tag", "-d",tag])
execute(["git", "fetch", "--tags", url, "+refs/heads/*:refs/remotes/origin/*"])
execute(["git", "checkout", self.revision])
finally:
popWorkingDir()
def postExtractLibpng (path):
shutil.copy(os.path.join(path, "scripts", "pnglibconf.h.prebuilt"),
os.path.join(path, "pnglibconf.h"))
PACKAGES = [
SourcePackage(
"http://zlib.net/zlib-1.2.11.tar.gz",
"zlib-1.2.11.tar.gz",
"c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
"zlib"),
SourcePackage(
"http://prdownloads.sourceforge.net/libpng/libpng-1.6.27.tar.gz",
"libpng-1.6.27.tar.gz",
"c9d164ec247f426a525a7b89936694aefbc91fb7a50182b198898b8fc91174b4",
"libpng",
postExtract = postExtractLibpng),
SourceFile(
"https://raw.githubusercontent.com/baldurk/renderdoc/v1.1/renderdoc/api/app/renderdoc_app.h",
"renderdoc_app.h",
"e7b5f0aa5b1b0eadc63a1c624c0ca7f5af133aa857d6a4271b0ef3d0bdb6868e",
"renderdoc"),
GitRepo(
"https://github.com/KhronosGroup/SPIRV-Tools.git",
None,
"34be23373b9e73694c3b214ba857283bad65aedb",
"spirv-tools"),
GitRepo(
"https://github.com/KhronosGroup/glslang.git",
None,
"b5f003d7a3ece37db45578a8a3140b370036fc64",
"glslang",
removeTags = ["master-tot"]),
GitRepo(
"https://github.com/KhronosGroup/SPIRV-Headers.git",
None,
"f8bf11a0253a32375c32cad92c841237b96696c0",
"spirv-headers"),
GitRepo(
"https://github.com/google/amber.git",
None,
"a40bef4dba98d2d80b48e5a940d8574fbfceb197",
"amber"),
]
def parseArgs ():
versionsForInsecure = ((2,7,9), (3,4,3))
versionsForInsecureStr = ' or '.join(('.'.join(str(x) for x in v)) for v in versionsForInsecure)
parser = argparse.ArgumentParser(description = "Fetch external sources")
parser.add_argument('--clean', dest='clean', action='store_true', default=False,
help='Remove sources instead of fetching')
parser.add_argument('--insecure', dest='insecure', action='store_true', default=False,
help="Disable certificate check for external sources."
" Minimum python version required " + versionsForInsecureStr)
parser.add_argument('--protocol', dest='protocol', default=None, choices=['ssh', 'https'],
help="Select protocol to checkout git repositories.")
args = parser.parse_args()
if args.insecure:
for versionItem in versionsForInsecure:
if (sys.version_info.major == versionItem[0]):
if sys.version_info < versionItem:
parser.error("For --insecure minimum required python version is " +
versionsForInsecureStr)
break;
return args
if __name__ == "__main__":
args = parseArgs()
for pkg in PACKAGES:
if args.clean:
pkg.clean()
else:
pkg.update(args.protocol)
| bsd-3-clause |
jshiv/turntable | test/lib/python2.7/site-packages/scipy/linalg/tests/test_decomp.py | 7 | 77994 | """ Test functions for linalg.decomp module
"""
from __future__ import division, print_function, absolute_import
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_decomp.py
"""
import numpy as np
from numpy.testing import (TestCase, assert_equal, assert_array_almost_equal,
assert_array_equal, assert_raises, assert_, assert_allclose,
run_module_suite, dec)
from scipy.lib.six import xrange
from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr,
schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq,
eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, qz, orth)
from scipy.linalg.lapack import dgbtrf, dgbtrs, zgbtrf, zgbtrs, \
dsbev, dsbevd, dsbevx, zhbevd, zhbevx
from scipy.linalg.misc import norm
from numpy import array, transpose, sometrue, diag, ones, linalg, \
argsort, zeros, arange, float32, complex64, dot, conj, identity, \
ravel, sqrt, iscomplex, shape, sort, conjugate, bmat, sign, \
asarray, matrix, isfinite, all, ndarray, outer, eye, dtype, empty,\
triu, tril
from numpy.random import rand, normal, seed
from scipy.linalg._testutils import assert_no_overwrite
# digit precision to use in asserts for different types
DIGITS = {'d':11, 'D':11, 'f':4, 'F':4}
# XXX: This function should be available through numpy.testing
def assert_dtype_equal(act, des):
if isinstance(act, ndarray):
act = act.dtype
else:
act = dtype(act)
if isinstance(des, ndarray):
des = des.dtype
else:
des = dtype(des)
assert_(act == des, 'dtype mismatch: "%s" (should be "%s") ' % (act, des))
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def symrand(dim_or_eigv):
"""Return a random symmetric (Hermitian) matrix.
If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues
uniformly distributed on (-1,1).
If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose
eigenvalues are 'a'.
"""
if isinstance(dim_or_eigv, int):
dim = dim_or_eigv
d = (rand(dim)*2)-1
elif (isinstance(dim_or_eigv, ndarray) and
len(dim_or_eigv.shape) == 1):
dim = dim_or_eigv.shape[0]
d = dim_or_eigv
else:
raise TypeError("input type not supported.")
v = random_rot(dim)
h = dot(dot(v.T.conj(), diag(d)), v)
# to avoid roundoff errors, symmetrize the matrix (again)
h = 0.5*(h.T+h)
return h
# XXX: This function should not be defined here, but somewhere in
# scipy.linalg namespace
def random_rot(dim):
"""Return a random rotation matrix, drawn from the Haar distribution
(the only uniform distribution on SO(n)).
The algorithm is described in the paper
Stewart, G.W., 'The efficient generation of random orthogonal
matrices with an application to condition estimators', SIAM Journal
on Numerical Analysis, 17(3), pp. 403-409, 1980.
For more information see
http://en.wikipedia.org/wiki/Orthogonal_matrix#Randomization"""
H = eye(dim)
D = ones((dim,))
for n in range(1, dim):
x = normal(size=(dim-n+1,))
D[n-1] = sign(x[0])
x[0] -= D[n-1]*sqrt((x*x).sum())
# Householder transformation
Hx = eye(dim-n+1) - 2.*outer(x, x)/(x*x).sum()
mat = eye(dim)
mat[n-1:,n-1:] = Hx
H = dot(H, mat)
# Fix the last sign such that the determinant is 1
D[-1] = -D.prod()
H = (D*H.T).T
return H
def random(size):
return rand(*size)
class TestEigVals(TestCase):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_tr(self):
a = array([[1,2,3],[1,2,3],[2,5,6]],'d')
a = transpose(a).copy()
a = transpose(a)
w = eigvals(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w = eigvals(a)
exact_w = [(9+1j+sqrt(92+6j))/2,
0,
(9+1j-sqrt(92+6j))/2]
assert_array_almost_equal(w,exact_w)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w = eigvals(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
assert_array_almost_equal(w,exact_w)
class TestEig(object):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
w,v = eig(a,left=1,right=0)
for i in range(3):
assert_array_almost_equal(dot(transpose(a),v[:,i]),w[i]*v[:,i])
def test_simple_complex_eig(self):
a = [[1,2],[-2,1]]
w,vl,vr = eig(a,left=1,right=1)
assert_array_almost_equal(w, array([1+2j, 1-2j]))
for i in range(2):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(2):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def test_simple_complex(self):
a = [[1,2,3],[1,2,3],[2,5,6+1j]]
w,vl,vr = eig(a,left=1,right=1)
for i in range(3):
assert_array_almost_equal(dot(a,vr[:,i]),w[i]*vr[:,i])
for i in range(3):
assert_array_almost_equal(dot(conjugate(transpose(a)),vl[:,i]),
conjugate(w[i])*vl[:,i])
def _check_gen_eig(self, A, B):
A, B = asarray(A), asarray(B)
msg = "\n%r\n%r" % (A, B)
w, vr = eig(A,B)
wt = eigvals(A,B)
val1 = dot(A, vr)
val2 = dot(B, vr) * w
res = val1 - val2
for i in range(res.shape[1]):
if all(isfinite(res[:, i])):
assert_array_almost_equal(res[:, i], 0, err_msg=msg)
assert_array_almost_equal(sort(w[isfinite(w)]), sort(wt[isfinite(wt)]),
err_msg=msg)
length = np.empty(len(vr))
for i in xrange(len(vr)):
length[i] = norm(vr[:, i])
assert_array_almost_equal(length, np.ones(length.size), err_msg=msg)
@dec.knownfailureif(True, "See gh-2254.")
def test_singular(self):
# Example taken from
# http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html
A = array(([22,34,31,31,17], [45,45,42,19,29], [39,47,49,26,34],
[27,31,26,21,15], [38,44,44,24,30]))
B = array(([13,26,25,17,24], [31,46,40,26,37], [26,40,19,25,25],
[16,25,27,14,23], [24,35,18,21,22]))
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_falker(self):
"""Test matrices giving some Nan generalized eigen values."""
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I = identity(3)
A = bmat([[I,Z],[Z,-K]])
B = bmat([[Z,I],[M,D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_bad_geneig(self):
# Ticket #709 (strange return values from DGGEV)
def matrices(omega):
c1 = -9 + omega**2
c2 = 2*omega
A = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, c1, 0],
[0, 0, 0, c1]]
B = [[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, -c2],
[0, 1, c2, 0]]
return A, B
# With a buggy LAPACK, this can fail for different omega on different
# machines -- so we need to test several values
olderr = np.seterr(all='ignore')
try:
for k in xrange(100):
A, B = matrices(omega=k*5./100)
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
w,v = eig(a, check_finite=False)
exact_w = [(9+sqrt(93))/2,0,(9-sqrt(93))/2]
v0 = array([1,1,(1+sqrt(93)/3)/2])
v1 = array([3.,0,-1])
v2 = array([1,1,(1-sqrt(93)/3)/2])
v0 = v0 / sqrt(dot(v0,transpose(v0)))
v1 = v1 / sqrt(dot(v1,transpose(v1)))
v2 = v2 / sqrt(dot(v2,transpose(v2)))
assert_array_almost_equal(w,exact_w)
assert_array_almost_equal(v0,v[:,0]*sign(v[0,0]))
assert_array_almost_equal(v1,v[:,1]*sign(v[0,1]))
assert_array_almost_equal(v2,v[:,2]*sign(v[0,2]))
for i in range(3):
assert_array_almost_equal(dot(a,v[:,i]),w[i]*v[:,i])
def test_not_square_error(self):
"""Check that passing a non-square array raises a ValueError."""
A = np.arange(6).reshape(3,2)
assert_raises(ValueError, eig, A)
def test_shape_mismatch(self):
"""Check that passing arrays of with different shapes raises a ValueError."""
A = identity(2)
B = np.arange(9.0).reshape(3,3)
assert_raises(ValueError, eig, A, B)
assert_raises(ValueError, eig, B, A)
class TestEigBanded(TestCase):
def __init__(self, *args):
TestCase.__init__(self, *args)
self.create_bandmat()
def create_bandmat(self):
"""Create the full matrix `self.fullmat` and
the corresponding band matrix `self.bandmat`."""
N = 10
self.KL = 2 # number of subdiagonals (below the diagonal)
self.KU = 2 # number of superdiagonals (above the diagonal)
# symmetric band matrix
self.sym_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# hermitian band matrix
self.herm_mat = (diag(-1.0*ones(N))
+ 1j*diag(1.0*ones(N-1), -1) - 1j*diag(1.0*ones(N-1), 1)
+ diag(-2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general real band matrix
self.real_mat = (diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# general complex band matrix
self.comp_mat = (1j*diag(1.0*ones(N))
+ diag(-1.0*ones(N-1), -1) + 1j*diag(-3.0*ones(N-1), 1)
+ diag(2.0*ones(N-2), -2) + diag(-2.0*ones(N-2), 2))
# Eigenvalues and -vectors from linalg.eig
ew, ev = linalg.eig(self.sym_mat)
ew = ew.real
args = argsort(ew)
self.w_sym_lin = ew[args]
self.evec_sym_lin = ev[:,args]
ew, ev = linalg.eig(self.herm_mat)
ew = ew.real
args = argsort(ew)
self.w_herm_lin = ew[args]
self.evec_herm_lin = ev[:,args]
# Extract upper bands from symmetric and hermitian band matrices
# (for use in dsbevd, dsbevx, zhbevd, zhbevx
# and their single precision versions)
LDAB = self.KU + 1
self.bandmat_sym = zeros((LDAB, N), dtype=float)
self.bandmat_herm = zeros((LDAB, N), dtype=complex)
for i in xrange(LDAB):
self.bandmat_sym[LDAB-i-1,i:N] = diag(self.sym_mat, i)
self.bandmat_herm[LDAB-i-1,i:N] = diag(self.herm_mat, i)
# Extract bands from general real and complex band matrix
# (for use in dgbtrf, dgbtrs and their single precision versions)
LDAB = 2*self.KL + self.KU + 1
self.bandmat_real = zeros((LDAB, N), dtype=float)
self.bandmat_real[2*self.KL,:] = diag(self.real_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_real[2*self.KL-1-i,i+1:N] = diag(self.real_mat, i+1)
# subdiagonals
self.bandmat_real[2*self.KL+1+i,0:N-1-i] = diag(self.real_mat,-i-1)
self.bandmat_comp = zeros((LDAB, N), dtype=complex)
self.bandmat_comp[2*self.KL,:] = diag(self.comp_mat) # diagonal
for i in xrange(self.KL):
# superdiagonals
self.bandmat_comp[2*self.KL-1-i,i+1:N] = diag(self.comp_mat, i+1)
# subdiagonals
self.bandmat_comp[2*self.KL+1+i,0:N-1-i] = diag(self.comp_mat,-i-1)
# absolute value for linear equation system A*x = b
self.b = 1.0*arange(N)
self.bc = self.b * (1 + 1j)
#####################################################################
def test_dsbev(self):
"""Compare dsbev eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbev(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevd(self):
"""Compare dsbevd eigenvalues and eigenvectors with
the result of linalg.eig."""
w, evec, info = dsbevd(self.bandmat_sym, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_dsbevx(self):
"""Compare dsbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.sym_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_sym_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin))
def test_zhbevd(self):
"""Compare zhbevd eigenvalues and eigenvectors
with the result of linalg.eig."""
w, evec, info = zhbevd(self.bandmat_herm, compute_v=1)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_zhbevx(self):
"""Compare zhbevx eigenvalues and eigenvectors
with the result of linalg.eig."""
N,N = shape(self.herm_mat)
## Achtung: Argumente 0.0,0.0,range?
w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N,
compute_v=1, range=2)
evec_ = evec[:,argsort(w)]
assert_array_almost_equal(sort(w), self.w_herm_lin)
assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin))
def test_eigvals_banded(self):
"""Compare eigenvalues of eigvals_banded with those of linalg.eig."""
w_sym = eigvals_banded(self.bandmat_sym)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
w_herm = eigvals_banded(self.bandmat_herm)
w_herm = w_herm.real
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind = eigvals_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
w_herm_ind = eigvals_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val = eigvals_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val = eigvals_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
w_sym = eigvals_banded(self.bandmat_sym, check_finite=False)
w_sym = w_sym.real
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
def test_eig_banded(self):
"""Compare eigenvalues and eigenvectors of eig_banded
with those of linalg.eig. """
w_sym, evec_sym = eig_banded(self.bandmat_sym)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
w_herm, evec_herm = eig_banded(self.bandmat_herm)
evec_herm_ = evec_herm[:,argsort(w_herm.real)]
assert_array_almost_equal(sort(w_herm), self.w_herm_lin)
assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin))
# extracting eigenvalues with respect to an index range
ind1 = 2
ind2 = 6
w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_sym_ind),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_ind),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm,
select='i', select_range=(ind1, ind2))
assert_array_almost_equal(sort(w_herm_ind),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_ind),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
# extracting eigenvalues with respect to a value range
v_lower = self.w_sym_lin[ind1] - 1.0e-5
v_upper = self.w_sym_lin[ind2] + 1.0e-5
w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_sym_val),
self.w_sym_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_sym_val),
abs(self.evec_sym_lin[:,ind1:ind2+1]))
v_lower = self.w_herm_lin[ind1] - 1.0e-5
v_upper = self.w_herm_lin[ind2] + 1.0e-5
w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm,
select='v', select_range=(v_lower, v_upper))
assert_array_almost_equal(sort(w_herm_val),
self.w_herm_lin[ind1:ind2+1])
assert_array_almost_equal(abs(evec_herm_val),
abs(self.evec_herm_lin[:,ind1:ind2+1]))
w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False)
evec_sym_ = evec_sym[:,argsort(w_sym.real)]
assert_array_almost_equal(sort(w_sym), self.w_sym_lin)
assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin))
def test_dgbtrf(self):
"""Compare dgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.real_mat)
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_zgbtrf(self):
"""Compare zgbtrf LU factorisation with the LU factorisation result
of linalg.lu."""
M,N = shape(self.comp_mat)
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
# extract matrix u from lu_symm_band
u = diag(lu_symm_band[2*self.KL,:])
for i in xrange(self.KL + self.KU):
u += diag(lu_symm_band[2*self.KL-1-i,i+1:N], i+1)
p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0)
assert_array_almost_equal(u, u_lin)
def test_dgbtrs(self):
"""Compare dgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU)
y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv)
y_lin = linalg.solve(self.real_mat, self.b)
assert_array_almost_equal(y, y_lin)
def test_zgbtrs(self):
"""Compare zgbtrs solutions for linear equation system A*x = b
with solutions of linalg.solve."""
lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU)
y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv)
y_lin = linalg.solve(self.comp_mat, self.bc)
assert_array_almost_equal(y, y_lin)
def test_eigh():
DIM = 6
v = {'dim': (DIM,),
'dtype': ('f','d','F','D'),
'overwrite': (True, False),
'lower': (True, False),
'turbo': (True, False),
'eigvals': (None, (2, DIM-2))}
for dim in v['dim']:
for typ in v['dtype']:
for overwrite in v['overwrite']:
for turbo in v['turbo']:
for eigenvals in v['eigvals']:
for lower in v['lower']:
yield (eigenhproblem_standard,
'ordinary',
dim, typ, overwrite, lower,
turbo, eigenvals)
yield (eigenhproblem_general,
'general ',
dim, typ, overwrite, lower,
turbo, eigenvals)
def _complex_symrand(dim, dtype):
a1, a2 = symrand(dim), symrand(dim)
# add antisymmetric matrix as imag part
a = a1 + 1j*(triu(a2)-tril(a2))
return a.astype(dtype)
def eigenhproblem_standard(desc, dim, dtype,
overwrite, lower, turbo,
eigvals):
"""Solve a standard eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
else:
a = symrand(dim).astype(dtype)
if overwrite:
a_c = a.copy()
else:
a_c = a
w, z = eigh(a, overwrite_a=overwrite, lower=lower, eigvals=eigvals)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag_, w, DIGITS[dtype])
def eigenhproblem_general(desc, dim, dtype,
overwrite, lower, turbo,
eigvals):
"""Solve a generalized eigenvalue problem."""
if iscomplex(empty(1, dtype=dtype)):
a = _complex_symrand(dim, dtype)
b = _complex_symrand(dim, dtype)+diag([2.1]*dim).astype(dtype)
else:
a = symrand(dim).astype(dtype)
b = symrand(dim).astype(dtype)+diag([2.1]*dim).astype(dtype)
if overwrite:
a_c, b_c = a.copy(), b.copy()
else:
a_c, b_c = a, b
w, z = eigh(a, b, overwrite_a=overwrite, lower=lower,
overwrite_b=overwrite, turbo=turbo, eigvals=eigvals)
assert_dtype_equal(z.dtype, dtype)
w = w.astype(dtype)
diag1_ = diag(dot(z.T.conj(), dot(a_c, z))).real
assert_array_almost_equal(diag1_, w, DIGITS[dtype])
diag2_ = diag(dot(z.T.conj(), dot(b_c, z))).real
assert_array_almost_equal(diag2_, ones(diag2_.shape[0]), DIGITS[dtype])
def test_eigh_integer():
a = array([[1,2],[2,7]])
b = array([[3,1],[1,5]])
w,z = eigh(a)
w,z = eigh(a,b)
class TestLU(TestCase):
def __init__(self, *args, **kw):
TestCase.__init__(self, *args, **kw)
self.a = array([[1,2,3],[1,2,3],[2,5,6]])
self.ca = array([[1,2,3],[1,2,3],[2,5j,6]])
# Those matrices are more robust to detect problems in permutation
# matrices than the ones above
self.b = array([[1,2,3],[4,5,6],[7,8,9]])
self.cb = array([[1j,2j,3j],[4j,5j,6j],[7j,8j,9j]])
# Reectangular matrices
self.hrect = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.chrect = 1.j * array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 12, 12]])
self.vrect = array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
self.cvrect = 1.j * array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 12, 12]])
# Medium sizes matrices
self.med = rand(30, 40)
self.cmed = rand(30, 40) + 1.j * rand(30, 40)
def _test_common(self, data):
p,l,u = lu(data)
assert_array_almost_equal(dot(dot(p,l),u),data)
pl,u = lu(data,permute_l=1)
assert_array_almost_equal(dot(pl,u),data)
# Simple tests
def test_simple(self):
self._test_common(self.a)
def test_simple_complex(self):
self._test_common(self.ca)
def test_simple2(self):
self._test_common(self.b)
def test_simple2_complex(self):
self._test_common(self.cb)
# rectangular matrices tests
def test_hrectangular(self):
self._test_common(self.hrect)
def test_vrectangular(self):
self._test_common(self.vrect)
def test_hrectangular_complex(self):
self._test_common(self.chrect)
def test_vrectangular_complex(self):
self._test_common(self.cvrect)
# Bigger matrices
def test_medium1(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.med)
def test_medium1_complex(self):
"""Check lu decomposition on medium size, rectangular matrix."""
self._test_common(self.cmed)
def test_check_finite(self):
p, l, u = lu(self.a, check_finite=False)
assert_array_almost_equal(dot(dot(p,l),u), self.a)
def test_simple_known(self):
# Ticket #1458
for order in ['C', 'F']:
A = np.array([[2, 1],[0, 1.]], order=order)
LU, P = lu_factor(A)
assert_array_almost_equal(LU, np.array([[2, 1], [0, 1]]))
assert_array_equal(P, np.array([0, 1]))
class TestLUSingle(TestLU):
"""LU testers for single precision, real and double"""
def __init__(self, *args, **kw):
TestLU.__init__(self, *args, **kw)
self.a = self.a.astype(float32)
self.ca = self.ca.astype(complex64)
self.b = self.b.astype(float32)
self.cb = self.cb.astype(complex64)
self.hrect = self.hrect.astype(float32)
self.chrect = self.hrect.astype(complex64)
self.vrect = self.vrect.astype(float32)
self.cvrect = self.vrect.astype(complex64)
self.med = self.vrect.astype(float32)
self.cmed = self.vrect.astype(complex64)
class TestLUSolve(TestCase):
def setUp(self):
seed(1234)
def test_lu(self):
a0 = random((10,10))
b = random((10,))
for order in ['C', 'F']:
a = np.array(a0, order=order)
x1 = solve(a,b)
lu_a = lu_factor(a)
x2 = lu_solve(lu_a,b)
assert_array_almost_equal(x1,x2)
def test_check_finite(self):
a = random((10,10))
b = random((10,))
x1 = solve(a,b)
lu_a = lu_factor(a, check_finite=False)
x2 = lu_solve(lu_a,b, check_finite=False)
assert_array_almost_equal(x1,x2)
class TestSVD(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_singular(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(transpose(u),u), identity(u.shape[1]))
assert_array_almost_equal(dot(transpose(vh),vh),identity(2))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random(self):
n = 20
m = 15
for i in range(3):
for a in [random([n,m]),random([m,n])]:
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(transpose(u),u),identity(u.shape[1]))
assert_array_almost_equal(dot(vh, transpose(vh)),identity(vh.shape[0]))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_simple_complex(self):
a = [[1,2,3],[1,2j,3],[2,5,6]]
for full_matrices in (True, False):
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(vh.shape[0]))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_random_complex(self):
n = 20
m = 15
for i in range(3):
for full_matrices in (True, False):
for a in [random([n,m]),random([m,n])]:
a = a + 1j*random(list(a.shape))
u,s,vh = svd(a, full_matrices=full_matrices)
assert_array_almost_equal(dot(conj(transpose(u)),u),identity(u.shape[1]))
# This fails when [m,n]
# assert_array_almost_equal(dot(conj(transpose(vh)),vh),identity(len(vh),dtype=vh.dtype.char))
sigma = zeros((u.shape[1],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
def test_crash_1580(self):
sizes = [(13, 23), (30, 50), (60, 100)]
np.random.seed(1234)
for sz in sizes:
for dt in [np.float32, np.float64, np.complex64, np.complex128]:
a = np.random.rand(*sz).astype(dt)
# should not crash
svd(a)
def test_check_finite(self):
a = [[1,2,3],[1,20,3],[2,5,6]]
u,s,vh = svd(a, check_finite=False)
assert_array_almost_equal(dot(transpose(u),u),identity(3))
assert_array_almost_equal(dot(transpose(vh),vh),identity(3))
sigma = zeros((u.shape[0],vh.shape[0]),s.dtype.char)
for i in range(len(s)):
sigma[i,i] = s[i]
assert_array_almost_equal(dot(dot(u,sigma),vh),a)
class TestSVDVals(TestCase):
def test_simple(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_complex(self):
a = [[1,2,3],[1,20,3j],[2,5,6]]
s = svdvals(a)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
def test_simple_underdet_complex(self):
a = [[1,2,3],[4,5j,6]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_simple_overdet_complex(self):
a = [[1,2],[4,5],[3j,4]]
s = svdvals(a)
assert_(len(s) == 2)
assert_(s[0] >= s[1])
def test_check_finite(self):
a = [[1,2,3],[1,2,3],[2,5,6]]
s = svdvals(a, check_finite=False)
assert_(len(s) == 3)
assert_(s[0] >= s[1] >= s[2])
@dec.slow
def test_crash_2609(self):
np.random.seed(1234)
a = np.random.rand(1500, 2800)
# Shouldn't crash:
svdvals(a)
class TestDiagSVD(TestCase):
def test_simple(self):
assert_array_almost_equal(diagsvd([1,0,0],3,3),[[1,0,0],[0,0,0],[0,0,0]])
class TestQR(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r = qr_multiply(a, mode="left", c=c)
assert_array_almost_equal(dot(q, c), qc[:, 0])
qc,r = qr_multiply(a, mode="left", c=identity(3))
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r = qr_multiply(a, mode="right", c=c)
assert_array_almost_equal(dot(c, q), qc[0, :])
qc,r = qr_multiply(a, mode="right", c=identity(3))
assert_array_almost_equal(q, qc)
def test_simple_left(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r2 = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_right(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a)
c = [1, 2, 3]
qc,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3],[5,3,6]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_left_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_right_pivoting(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
def test_simple_trap_pivoting(self):
a = np.asarray([[8,2,3],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall(self):
# full version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_tall_pivoting(self):
# full version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_e(self):
# economy version
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (3,2))
assert_equal(r.shape, (2,2))
def test_simple_tall_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2],[2,9],[5,3]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_tall_left(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_tall_left_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,kpvt = qr_multiply(a, c, "left", True)
assert_array_equal(jpvt, kpvt)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_tall_right(self):
a = [[8,2],[2,9],[5,3]]
q,r = qr(a, mode="economic")
c = [1, 2, 3]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(3))
assert_array_almost_equal(cq, q)
def test_simple_tall_right_pivoting(self):
a = [[8,2],[2,9],[5,3]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2, 3]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(3), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_fat(self):
# full version
a = [[8,2,5],[2,9,3]]
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_pivoting(self):
# full version pivoting
a = np.asarray([[8,2,5],[2,9,3]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_e(self):
# economy version
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
def test_simple_fat_e_pivoting(self):
# economy version pivoting
a = np.asarray([[8,2,3],[2,9,5]])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (2,2))
assert_equal(r.shape, (2,3))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_fat_left(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_fat_left_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, mode="economic", pivoting=True)
c = [1, 2]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
qc,r,jpvt = qr_multiply(a, identity(2), "left", True)
assert_array_almost_equal(qc, q)
def test_simple_fat_right(self):
a = [[8,2,3],[2,9,5]]
q,r = qr(a, mode="economic")
c = [1, 2]
cq,r2 = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
assert_array_almost_equal(r, r2)
cq,r = qr_multiply(a, identity(2))
assert_array_almost_equal(cq, q)
def test_simple_fat_right_pivoting(self):
a = [[8,2,3],[2,9,5]]
q,r,jpvt = qr(a, pivoting=True, mode="economic")
c = [1, 2]
cq,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), cq)
cq,r,jpvt = qr_multiply(a, identity(2), pivoting=True)
assert_array_almost_equal(cq, q)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
def test_simple_complex_left(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(3), "left")
assert_array_almost_equal(q, qc)
def test_simple_complex_right(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), qc)
qc,r = qr_multiply(a, identity(3))
assert_array_almost_equal(q, qc)
def test_simple_tall_complex_left(self):
a = [[8,2+3j],[2,9],[5+7j,3]]
q,r = qr(a, mode="economic")
c = [1, 2+2j]
qc,r2 = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
assert_array_almost_equal(r, r2)
c = array([1,2,0])
qc,r2 = qr_multiply(a, c, "left", overwrite_c=True)
assert_array_almost_equal(dot(q, c[:2]), qc)
qc,r = qr_multiply(a, identity(2), "left")
assert_array_almost_equal(qc, q)
def test_simple_complex_left_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_tall_left_conjugate(self):
a = [[3,3+4j],[5,2+2j],[3,2]]
q,r = qr(a, mode='economic')
c = [1, 3+4j]
qc,r = qr_multiply(a, c, "left", conjugate=True)
assert_array_almost_equal(dot(q.conjugate(), c), qc)
def test_simple_complex_right_conjugate(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
q,r = qr(a)
c = [1, 2, 3+4j]
qc,r = qr_multiply(a, c, conjugate=True)
assert_array_almost_equal(dot(c, q.conjugate()), qc)
def test_simple_complex_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(3))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_simple_complex_left_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, "left", True)
assert_array_almost_equal(dot(q, c), qc)
def test_simple_complex_right_pivoting(self):
a = np.asarray([[3,3+4j,5],[5,2,2+7j],[3,2,7]])
q,r,jpvt = qr(a, pivoting=True)
c = [1, 2, 3+4j]
qc,r,jpvt = qr_multiply(a, c, pivoting=True)
assert_array_almost_equal(dot(c, q), qc)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_left(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_right(self):
n = 20
for k in range(2):
a = random([n,n])
q,r = qr(a)
c = random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_tall_left(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(qc, q)
def test_random_tall_right(self):
# full version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode="economic")
c = random([m])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(m))
assert_array_almost_equal(cq, q)
def test_random_tall_pivoting(self):
# full version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_tall_e(self):
# economy version
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r = qr(a, mode='economic')
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
def test_random_tall_e_pivoting(self):
# economy version pivoting
m = 200
n = 100
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True, mode='economic')
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
assert_equal(q.shape, (m,n))
assert_equal(r.shape, (n,n))
q2,r2 = qr(a[:,p], mode='economic')
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r = qr(a)
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a)
def test_random_trap_pivoting(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(transpose(q),q),identity(m))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a)
def test_random_complex_left(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
qc,r = qr_multiply(a, c, "left")
assert_array_almost_equal(dot(q, c), qc)
qc,r = qr_multiply(a, identity(n), "left")
assert_array_almost_equal(q, qc)
def test_random_complex_right(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r = qr(a)
c = random([n])+1j*random([n])
cq,r = qr_multiply(a, c)
assert_array_almost_equal(dot(c, q), cq)
cq,r = qr_multiply(a, identity(n))
assert_array_almost_equal(q, cq)
def test_random_complex_pivoting(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
q,r,p = qr(a, pivoting=True)
d = abs(diag(r))
assert_(all(d[1:] <= d[:-1]))
assert_array_almost_equal(dot(conj(transpose(q)),q),identity(n))
assert_array_almost_equal(dot(q,r),a[:,p])
q2,r2 = qr(a[:,p])
assert_array_almost_equal(q,q2)
assert_array_almost_equal(r,r2)
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
q,r = qr(a, check_finite=False)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(q,r),a)
class TestRQ(TestCase):
def setUp(self):
seed(1234)
def test_simple(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_r(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a)
r2 = rq(a, mode='r')
assert_array_almost_equal(r, r2)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_simple_trap(self):
a = [[8,2,3],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_tall(self):
a = [[8,2],[2,9],[5,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(2))
assert_array_almost_equal(dot(r,q),a)
def test_simple_fat(self):
a = [[8,2,5],[2,9,3]]
r,q = rq(a)
assert_array_almost_equal(dot(transpose(q),q),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_simple_complex(self):
a = [[3,3+4j,5],[5,2,2+7j],[3,2,7]]
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(3))
assert_array_almost_equal(dot(r,q),a)
def test_random_tall(self):
m = 200
n = 100
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, transpose(q)),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_trap_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,transpose(q)),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
r,q = rq(a)
assert_array_almost_equal(dot(q, conj(transpose(q))),identity(n))
assert_array_almost_equal(dot(r,q),a)
def test_random_complex_economic(self):
m = 100
n = 200
for k in range(2):
a = random([m,n])+1j*random([m,n])
r,q = rq(a, mode='economic')
assert_array_almost_equal(dot(q,conj(transpose(q))),identity(m))
assert_array_almost_equal(dot(r,q),a)
assert_equal(q.shape, (m, n))
assert_equal(r.shape, (m, m))
def test_check_finite(self):
a = [[8,2,3],[2,9,3],[5,3,6]]
r,q = rq(a, check_finite=False)
assert_array_almost_equal(dot(q, transpose(q)),identity(3))
assert_array_almost_equal(dot(r,q),a)
transp = transpose
any = sometrue
class TestSchur(TestCase):
def test_simple(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
tc,zc = schur(a,'complex')
assert_(any(ravel(iscomplex(zc))) and any(ravel(iscomplex(tc))))
assert_array_almost_equal(dot(dot(zc,tc),transp(conj(zc))),a)
tc2,zc2 = rsf2csf(tc,zc)
assert_array_almost_equal(dot(dot(zc2,tc2),transp(conj(zc2))),a)
def test_sort(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
s,u,sdim = schur(a,sort='lhp')
assert_array_almost_equal([[0.1134,0.5436,0.8316,0.],
[-0.1134,-0.8245,0.5544,0.],
[-0.8213,0.1308,0.0265,-0.5547],
[-0.5475,0.0872,0.0177,0.8321]],
u,3)
assert_array_almost_equal([[-1.4142,0.1456,-11.5816,-7.7174],
[0.,-0.5000,9.4472,-0.7184],
[0.,0.,1.4142,-0.1456],
[0.,0.,0.,0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='rhp')
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='iuc')
assert_array_almost_equal([[0.5547,0.,-0.5721,-0.6042],
[-0.8321,0.,-0.3814,-0.4028],
[0.,0.7071,-0.5134,0.4862],
[0.,0.7071,0.5134,-0.4862]],
u,3)
assert_array_almost_equal([[-0.5000,0.0000,-6.5809,-4.0974],
[0.,0.5000,-3.3191,-14.4130],
[0.,0.,1.4142,2.1573],
[0.,0.,0.,-1.4142]],
s,3)
assert_equal(2,sdim)
s,u,sdim = schur(a,sort='ouc')
assert_array_almost_equal([[0.4862,-0.5134,0.7071,0.],
[-0.4862,0.5134,0.7071,0.],
[0.6042,0.5721,0.,-0.5547],
[0.4028,0.3814,0.,0.8321]],
u,3)
assert_array_almost_equal([[1.4142,-2.1573,14.4130,4.0974],
[0.,-1.4142,3.3191,6.5809],
[0.,0.,-0.5000,0.],
[0.,0.,0.,0.5000]],
s,3)
assert_equal(2,sdim)
rhp_function = lambda x: x >= 0.0
s,u,sdim = schur(a,sort=rhp_function)
assert_array_almost_equal([[0.4862,-0.4930,0.1434,-0.7071],
[-0.4862,0.4930,-0.1434,-0.7071],
[0.6042,0.3944,-0.6924,0.],
[0.4028,0.5986,0.6924,0.]],
u,3)
assert_array_almost_equal([[1.4142,-0.9270,4.5368,-14.4130],
[0.,0.5,6.5809,-3.1870],
[0.,0.,-1.4142,0.9270],
[0.,0.,0.,-0.5]],
s,3)
assert_equal(2,sdim)
def test_sort_errors(self):
a = [[4.,3.,1.,-1.],[-4.5,-3.5,-1.,1.],[9.,6.,-4.,4.5],[6.,4.,-3.,3.5]]
assert_raises(ValueError, schur, a, sort='unsupported')
assert_raises(ValueError, schur, a, sort=1)
def test_check_finite(self):
a = [[8,12,3],[2,9,3],[10,3,6]]
t,z = schur(a, check_finite=False)
assert_array_almost_equal(dot(dot(z,t),transp(conj(z))),a)
class TestHessenberg(TestCase):
def test_simple(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
def test_simple_complex(self):
a = [[-149, -50,-154],
[537, 180j, 546],
[-27j, -9, -25]]
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_simple2(self):
a = [[1,2,3,4,5,6,7],
[0,2,3,4,6,7,2],
[0,2,2,3,0,3,2],
[0,0,2,8,0,0,2],
[0,3,1,2,0,1,2],
[0,1,2,3,0,1,0],
[0,0,0,0,0,1,2]]
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_simple3(self):
a = np.eye(3)
a[-1, 0] = 2
h, q = hessenberg(a, calc_q=1)
assert_array_almost_equal(dot(transp(q), dot(a, q)), h)
def test_random(self):
n = 20
for k in range(2):
a = random([n,n])
h,q = hessenberg(a,calc_q=1)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
def test_random_complex(self):
n = 20
for k in range(2):
a = random([n,n])+1j*random([n,n])
h,q = hessenberg(a,calc_q=1)
h1 = dot(transp(conj(q)),dot(a,q))
assert_array_almost_equal(h1,h)
def test_check_finite(self):
a = [[-149, -50,-154],
[537, 180, 546],
[-27, -9, -25]]
h1 = [[-149.0000,42.2037,-156.3165],
[-537.6783,152.5511,-554.9272],
[0,0.0728, 2.4489]]
h,q = hessenberg(a,calc_q=1, check_finite=False)
assert_array_almost_equal(dot(transp(q),dot(a,q)),h)
assert_array_almost_equal(h,h1,decimal=4)
class TestQZ(TestCase):
def setUp(self):
seed(12345)
def test_qz_single(self):
n = 5
A = random([n,n]).astype(float32)
B = random([n,n]).astype(float32)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_complex(self):
n = 5
A = random([n,n]) + 1j*random([n,n])
B = random([n,n]) + 1j*random([n,n])
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_complex64(self):
n = 5
A = (random([n,n]) + 1j*random([n,n])).astype(complex64)
B = (random([n,n]) + 1j*random([n,n])).astype(complex64)
AA,BB,Q,Z = qz(A,B)
assert_array_almost_equal(dot(dot(Q,AA),Z.conjugate().T), A, decimal=5)
assert_array_almost_equal(dot(dot(Q,BB),Z.conjugate().T), B, decimal=5)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n), decimal=5)
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n), decimal=5)
assert_(all(diag(BB) >= 0))
assert_(all(diag(BB).imag == 0))
def test_qz_double_complex(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B, output='complex')
aa = dot(dot(Q,AA),Z.conjugate().T)
assert_array_almost_equal(aa.real, A)
assert_array_almost_equal(aa.imag, 0)
bb = dot(dot(Q,BB),Z.conjugate().T)
assert_array_almost_equal(bb.real, B)
assert_array_almost_equal(bb.imag, 0)
assert_array_almost_equal(dot(Q,Q.conjugate().T), eye(n))
assert_array_almost_equal(dot(Z,Z.conjugate().T), eye(n))
assert_(all(diag(BB) >= 0))
def test_qz_double_sort(self):
# from http://www.nag.com/lapack-ex/node119.html
# NOTE: These matrices may be ill-conditioned and lead to a
# seg fault on certain python versions when compiled with
# sse2 or sse3 older ATLAS/LAPACK binaries for windows
# A = np.array([[3.9, 12.5, -34.5, -0.5],
# [ 4.3, 21.5, -47.5, 7.5],
# [ 4.3, 21.5, -43.5, 3.5],
# [ 4.4, 26.0, -46.0, 6.0 ]])
# B = np.array([[ 1.0, 2.0, -3.0, 1.0],
# [1.0, 3.0, -5.0, 4.0],
# [1.0, 3.0, -4.0, 3.0],
# [1.0, 3.0, -4.0, 4.0]])
A = np.array([[3.9, 12.5, -34.5, 2.5],
[4.3, 21.5, -47.5, 7.5],
[4.3, 1.5, -43.5, 3.5],
[4.4, 6.0, -46.0, 6.0]])
B = np.array([[1.0, 1.0, -3.0, 1.0],
[1.0, 3.0, -5.0, 4.4],
[1.0, 2.0, -4.0, 1.0],
[1.2, 3.0, -4.0, 4.0]])
sort = lambda ar,ai,beta: ai == 0
assert_raises(ValueError, qz, A, B, sort=sort)
if False:
AA,BB,Q,Z,sdim = qz(A,B,sort=sort)
# assert_(sdim == 2)
assert_(sdim == 4)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
assert_array_almost_equal(np.abs(AA), np.abs(np.array(
[[35.7864, -80.9061, -12.0629, -9.498],
[0., 2.7638, -2.3505, 7.3256],
[0., 0., 0.6258, -0.0398],
[0., 0., 0., -12.8217]])), 4)
assert_array_almost_equal(np.abs(BB), np.abs(np.array(
[[4.5324, -8.7878, 3.2357, -3.5526],
[0., 1.4314, -2.1894, 0.9709],
[0., 0., 1.3126, -0.3468],
[0., 0., 0., 0.559]])), 4)
assert_array_almost_equal(np.abs(Q), np.abs(np.array(
[[-0.4193, -0.605, -0.1894, -0.6498],
[-0.5495, 0.6987, 0.2654, -0.3734],
[-0.4973, -0.3682, 0.6194, 0.4832],
[-0.5243, 0.1008, -0.7142, 0.4526]])), 4)
assert_array_almost_equal(np.abs(Z), np.abs(np.array(
[[-0.9471, -0.2971, -0.1217, 0.0055],
[-0.0367, 0.1209, 0.0358, 0.9913],
[0.3171, -0.9041, -0.2547, 0.1312],
[0.0346, 0.2824, -0.9587, 0.0014]])), 4)
# test absolute values bc the sign is ambiguous and might be platform
# dependent
# assert_array_almost_equal(abs(AA), abs(np.array([
# [3.8009, -69.4505, 50.3135, -43.2884],
# [0.0000, 9.2033, -0.2001, 5.9881],
# [0.0000, 0.0000, 1.4279, 4.4453],
# [0.0000, 0.0000, 0.9019, -1.1962]])), 4)
# assert_array_almost_equal(abs(BB), abs(np.array([
# [1.9005, -10.2285, 0.8658, -5.2134],
# [0.0000, 2.3008, 0.7915, 0.4262],
# [0.0000, 0.0000, 0.8101, 0.0000],
# [0.0000, 0.0000, 0.0000, -0.2823]])), 4)
# assert_array_almost_equal(abs(Q), abs(np.array([
# [0.4642, 0.7886, 0.2915, -0.2786],
# [0.5002, -0.5986, 0.5638, -0.2713],
# [0.5002, 0.0154, -0.0107, 0.8657],
# [0.5331, -0.1395, -0.7727, -0.3151]])), 4)
# assert_array_almost_equal(dot(Q,Q.T), eye(4))
# assert_array_almost_equal(abs(Z), abs(np.array([
# [0.9961, -0.0014, 0.0887, -0.0026],
# [0.0057, -0.0404, -0.0938, -0.9948],
# [0.0626, 0.7194, -0.6908, 0.0363],
# [0.0626, -0.6934, -0.7114, 0.0956]])), 4)
# assert_array_almost_equal(dot(Z,Z.T), eye(4))
# def test_qz_complex_sort(self):
# cA = np.array([
# [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j],
# [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j],
# [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j],
# [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]])
# cB = np.array([
# [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j],
# [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j],
# [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j],
# [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]])
# AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp')
# eigenvalues = diag(AAS)/diag(BBS)
# assert_(all(np.real(eigenvalues[:sdim] < 0)))
# assert_(all(np.real(eigenvalues[sdim:] > 0)))
def test_check_finite(self):
n = 5
A = random([n,n])
B = random([n,n])
AA,BB,Q,Z = qz(A,B,check_finite=False)
assert_array_almost_equal(dot(dot(Q,AA),Z.T), A)
assert_array_almost_equal(dot(dot(Q,BB),Z.T), B)
assert_array_almost_equal(dot(Q,Q.T), eye(n))
assert_array_almost_equal(dot(Z,Z.T), eye(n))
assert_(all(diag(BB) >= 0))
class TestDatacopied(TestCase):
def test_datacopied(self):
from scipy.linalg.decomp import _datacopied
M = matrix([[0,1],[2,3]])
A = asarray(M)
L = M.tolist()
M2 = M.copy()
class Fake1:
def __array__(self):
return A
class Fake2:
__array_interface__ = A.__array_interface__
F1 = Fake1()
F2 = Fake2()
AF1 = asarray(F1)
AF2 = asarray(F2)
for item, status in [(M, False), (A, False), (L, True),
(M2, False), (F1, False), (F2, False)]:
arr = asarray(item)
assert_equal(_datacopied(arr, item), status,
err_msg=repr(item))
def test_aligned_mem_float():
"""Check linalg works with non-aligned memory"""
# Allocate 402 bytes of memory (allocated on boundary)
a = arange(402, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=2, count=100, dtype=float32)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem():
"""Check linalg works with non-aligned memory"""
# Allocate 804 bytes of memory (allocated on boundary)
a = arange(804, dtype=np.uint8)
# Create an array with boundary offset 4
z = np.frombuffer(a.data, offset=4, count=100, dtype=float)
z.shape = 10, 10
eig(z, overwrite_a=True)
eig(z.T, overwrite_a=True)
def test_aligned_mem_complex():
"""Check that complex objects don't need to be completely aligned"""
# Allocate 1608 bytes of memory (allocated on boundary)
a = zeros(1608, dtype=np.uint8)
# Create an array with boundary offset 8
z = np.frombuffer(a.data, offset=8, count=100, dtype=complex)
z.shape = 10, 10
eig(z, overwrite_a=True)
# This does not need special handling
eig(z.T, overwrite_a=True)
def check_lapack_misaligned(func, args, kwargs):
args = list(args)
for i in range(len(args)):
a = args[:]
if isinstance(a[i],np.ndarray):
# Try misaligning a[i]
aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8)
aa = np.frombuffer(aa.data, offset=4, count=a[i].size, dtype=a[i].dtype)
aa.shape = a[i].shape
aa[...] = a[i]
a[i] = aa
func(*a,**kwargs)
if len(a[i].shape) > 1:
a[i] = a[i].T
func(*a,**kwargs)
@dec.knownfailureif(True, "Ticket #1152, triggers a segfault in rare cases.")
def test_lapack_misaligned():
M = np.eye(10,dtype=float)
R = np.arange(100)
R.shape = 10,10
S = np.arange(20000,dtype=np.uint8)
S = np.frombuffer(S.data, offset=4, count=100, dtype=np.float)
S.shape = 10, 10
b = np.ones(10)
v = np.ones(3,dtype=float)
LU, piv = lu_factor(S)
for (func, args, kwargs) in [
(eig,(S,),dict(overwrite_a=True)), # crash
(eigvals,(S,),dict(overwrite_a=True)), # no crash
(lu,(S,),dict(overwrite_a=True)), # no crash
(lu_factor,(S,),dict(overwrite_a=True)), # no crash
(lu_solve,((LU,piv),b),dict(overwrite_b=True)),
(solve,(S,b),dict(overwrite_a=True,overwrite_b=True)),
(svd,(M,),dict(overwrite_a=True)), # no crash
(svd,(R,),dict(overwrite_a=True)), # no crash
(svd,(S,),dict(overwrite_a=True)), # crash
(svdvals,(S,),dict()), # no crash
(svdvals,(S,),dict(overwrite_a=True)), # crash
(cholesky,(M,),dict(overwrite_a=True)), # no crash
(qr,(S,),dict(overwrite_a=True)), # crash
(rq,(S,),dict(overwrite_a=True)), # crash
(hessenberg,(S,),dict(overwrite_a=True)), # crash
(schur,(S,),dict(overwrite_a=True)), # crash
]:
yield check_lapack_misaligned, func, args, kwargs
# not properly tested
# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd
class TestOverwrite(object):
def test_eig(self):
assert_no_overwrite(eig, [(3,3)])
assert_no_overwrite(eig, [(3,3), (3,3)])
def test_eigh(self):
assert_no_overwrite(eigh, [(3,3)])
assert_no_overwrite(eigh, [(3,3), (3,3)])
def test_eig_banded(self):
assert_no_overwrite(eig_banded, [(3,2)])
def test_eigvals(self):
assert_no_overwrite(eigvals, [(3,3)])
def test_eigvalsh(self):
assert_no_overwrite(eigvalsh, [(3,3)])
def test_eigvals_banded(self):
assert_no_overwrite(eigvals_banded, [(3,2)])
def test_hessenberg(self):
assert_no_overwrite(hessenberg, [(3,3)])
def test_lu_factor(self):
assert_no_overwrite(lu_factor, [(3,3)])
def test_lu_solve(self):
x = np.array([[1,2,3], [4,5,6], [7,8,8]])
xlu = lu_factor(x)
assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)])
def test_lu(self):
assert_no_overwrite(lu, [(3,3)])
def test_qr(self):
assert_no_overwrite(qr, [(3,3)])
def test_rq(self):
assert_no_overwrite(rq, [(3,3)])
def test_schur(self):
assert_no_overwrite(schur, [(3,3)])
def test_schur_complex(self):
assert_no_overwrite(lambda a: schur(a, 'complex'), [(3,3)],
dtypes=[np.float32, np.float64])
def test_svd(self):
assert_no_overwrite(svd, [(3,3)])
def test_svdvals(self):
assert_no_overwrite(svdvals, [(3,3)])
def _check_orth(n):
X = np.ones((n, 2), dtype=float)
Y = orth(X)
assert_equal(Y.shape, (n, 1))
assert_allclose(Y, Y.mean(), atol=1e-10)
Y = orth(X.T)
assert_equal(Y.shape, (2, 1))
assert_allclose(Y, Y.mean())
@dec.slow
def test_orth_memory_efficiency():
# Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable.
# Keep in mind that @dec.slow tests are likely to be running
# under configurations that support 4Gb+ memory for tests related to
# 32 bit overflow.
n = 10*1000*1000
try:
_check_orth(n)
except MemoryError as e:
raise AssertionError('memory error perhaps caused by orth regression')
def test_orth():
for n in 1, 2, 3, 10, 100:
_check_orth(n)
if __name__ == "__main__":
run_module_suite()
| mit |
ThirdProject/android_external_chromium_org | build/android/gyp/apk_install.py | 28 | 3351 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Installs an APK.
"""
import optparse
import os
import re
import subprocess
import sys
from util import build_device
from util import build_utils
from util import md5_check
BUILD_ANDROID_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILD_ANDROID_DIR)
from pylib import constants
from pylib.utils import apk_helper
def GetNewMetadata(device, apk_package):
"""Gets the metadata on the device for the apk_package apk."""
output = device.RunShellCommand('ls -l /data/app/')
# Matches lines like:
# -rw-r--r-- system system 7376582 2013-04-19 16:34 org.chromium.chrome.testshell.apk
# -rw-r--r-- system system 7376582 2013-04-19 16:34 org.chromium.chrome.testshell-1.apk
apk_matcher = lambda s: re.match('.*%s(-[0-9]*)?.apk$' % apk_package, s)
matches = filter(apk_matcher, output)
return matches[0] if matches else None
def HasInstallMetadataChanged(device, apk_package, metadata_path):
"""Checks if the metadata on the device for apk_package has changed."""
if not os.path.exists(metadata_path):
return True
with open(metadata_path, 'r') as expected_file:
return expected_file.read() != device.GetInstallMetadata(apk_package)
def RecordInstallMetadata(device, apk_package, metadata_path):
"""Records the metadata from the device for apk_package."""
metadata = GetNewMetadata(device, apk_package)
if not metadata:
raise Exception('APK install failed unexpectedly.')
with open(metadata_path, 'w') as outfile:
outfile.write(metadata)
def main(argv):
parser = optparse.OptionParser()
parser.add_option('--apk-path',
help='Path to .apk to install.')
parser.add_option('--install-record',
help='Path to install record (touched only when APK is installed).')
parser.add_option('--build-device-configuration',
help='Path to build device configuration.')
parser.add_option('--stamp',
help='Path to touch on success.')
parser.add_option('--configuration-name',
help='The build CONFIGURATION_NAME')
options, _ = parser.parse_args()
device = build_device.GetBuildDeviceFromPath(
options.build_device_configuration)
if not device:
return
constants.SetBuildType(options.configuration_name)
serial_number = device.GetSerialNumber()
apk_package = apk_helper.GetPackageName(options.apk_path)
metadata_path = '%s.%s.device.time.stamp' % (options.apk_path, serial_number)
# If the APK on the device does not match the one that was last installed by
# the build, then the APK has to be installed (regardless of the md5 record).
force_install = HasInstallMetadataChanged(device, apk_package, metadata_path)
def Install():
device.Install(options.apk_path, reinstall=True)
RecordInstallMetadata(device, apk_package, metadata_path)
build_utils.Touch(options.install_record)
record_path = '%s.%s.md5.stamp' % (options.apk_path, serial_number)
md5_check.CallAndRecordIfStale(
Install,
record_path=record_path,
input_paths=[options.apk_path],
force=force_install)
if options.stamp:
build_utils.Touch(options.stamp)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
xlcteam/pgu | pgu/gui/layout.py | 32 | 4901 | """Document layout engine."""
class Layout:
"""The document layout engine."""
def __init__(self,rect=None):
"""initialize the object with the size of the box."""
self._widgets = []
self.rect = rect
def add(self,e):
"""Add a document element to the layout.
The document element may be
* a tuple (w,h) if it is a whitespace element
* a tuple (0,h) if it is a linebreak element
* an integer -1,0,1 if it is a command to start a new block of elements
that are aligned either left,center, or right.
* an object with a .rect (for size) -- such as a word element
* an object with a .rect (for size) and .align -- such as an image element
"""
self._widgets.append(e)
def resize(self):
"""Resize the layout.
This method recalculates the position of all document elements after
they have been added to the document. .rect.x,y will be updated for
all objects.
"""
self.init()
self.widgets = []
for e in self._widgets:
if type(e) is tuple and e[0] != 0:
self.do_space(e)
elif type(e) is tuple and e[0] == 0:
self.do_br(e[1])
elif type(e) is int:
self.do_block(align=e)
elif hasattr(e,'align'):
self.do_align(e)
else:
self.do_item(e)
self.line()
self.rect.h = max(self.y,self.left_bottom,self.right_bottom)
def init(self):
self.x,self.y = self.rect.x,self.rect.y
self.left = self.rect.left
self.right = self.rect.right
self.left_bottom = 0
self.right_bottom = 0
self.y = self.rect.y
self.x = self.rect.x
self.h = 0
self.items = []
self.align = -1
def getleft(self):
if self.y > self.left_bottom:
self.left = self.rect.left
return self.left
def getright(self):
if self.y > self.right_bottom:
self.right = self.rect.right
return self.right
def do_br(self,h):
self.line()
self.h = h
def do_block(self,align=-1):
self.line()
self.align = align
def do_align(self,e):
align = e.align
ox,oy,oh = self.x,self.y,self.h
w,h = e.rect.w,e.rect.h
if align == 0:
self.line()
self.x = self.rect.left + (self.rect.width-w)/2
self.fit = 0
elif align == -1:
self.line()
self.y = max(self.left_bottom,self.y + self.h)
self.h = 0
self.x = self.rect.left
elif align == 1:
self.line()
self.y = max(self.right_bottom,self.y + self.h)
self.h = 0
self.x = self.rect.left + (self.rect.width-w)
e.rect.x,e.rect.y = self.x,self.y
self.x = self.x + w
self.y = self.y
if align == 0:
self.h = max(self.h,h)
self.y = self.y + self.h
self.x = self.getleft()
self.h = 0
elif align == -1:
self.left = self.x
self.left_bottom = self.y + h
self.x,self.y,self.h = ox + w,oy,oh
elif align == 1:
self.right = self.x - w
self.right_bottom = self.y + h
self.x,self.y,self.h = ox,oy,oh
self.widgets.append(e)
def do_space(self,e):
w,h = e
if self.x+w >= self.getright():
self.line()
else:
self.items.append(e)
self.h = max(self.h,h)
self.x += w
def do_item(self,e):
w,h = e.rect.w,e.rect.h
if self.x+w >= self.getright():
self.line()
self.items.append(e)
self.h = max(self.h,h)
self.x += w
def line(self):
x1 = self.getleft()
x2 = self.getright()
align = self.align
y = self.y
if len(self.items) != 0 and type(self.items[-1]) == tuple:
del self.items[-1]
w = 0
for e in self.items:
if type(e) == tuple: w += e[0]
else: w += e.rect.w
if align == -1: x = x1
elif align == 0:
x = x1 + ((x2-x1)-w)/2
self.fit = 0
elif align == 1: x = x2 - w
for e in self.items:
if type(e) == tuple: x += e[0]
else:
e.rect.x,e.rect.y = x,y
self.widgets.append(e)
x += e.rect.w
self.items = []
self.y = self.y + self.h
self.x = self.getleft()
self.h = 0
| lgpl-2.1 |
AnishShah/tensorflow | tensorflow/python/kernel_tests/boosted_trees/prediction_ops_test.py | 1 | 31808 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests boosted_trees prediction kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.kernels.boosted_trees import boosted_trees_pb2
from tensorflow.python.framework import test_util
from tensorflow.python.ops import boosted_trees_ops
from tensorflow.python.ops import resources
from tensorflow.python.platform import googletest
class TrainingPredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for training."""
def testCachedPredictionOnEmptyEnsemble(self):
"""Tests that prediction on a dummy ensemble does not fail."""
with self.cached_session() as session:
# Create a dummy ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# No previous cached values.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
# We have two features: 0 and 1. Values don't matter here on a dummy
# ensemble.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
def testNoCachedPredictionButTreeExists(self):
"""Tests that predictions are updated once trees are added."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 1
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, none were cached before.
cached_tree_ids = [0, 0]
cached_node_ids = [0, 0]
feature_0_values = [67, 5]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the first tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([2, 1], new_node_ids)
self.assertAllClose([[0.1 * 8.79], [0.1 * 1.14]], logits_updates)
def testCachedPredictionIsCurrent(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 2]
# We have two features: 0 and 1. Values don't matter because trees didn't
# change.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Nothing changed.
self.assertAllClose(cached_tree_ids, new_tree_ids)
self.assertAllClose(cached_node_ids, new_node_ids)
self.assertAllClose([[0], [0]], logits_updates)
def testCachedPredictionFromTheSameTree(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 15
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf {
scalar: -2
}
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 7
left_id: 3
right_id: 4
}
metadata {
gain: 1.4
original_leaf {
scalar: 7.14
}
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 7
left_id: 5
right_id: 6
}
metadata {
gain: 2.7
original_leaf {
scalar: -4.375
}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
nodes {
leaf {
scalar: -5.875
}
}
nodes {
leaf {
scalar: -2.075
}
}
}
tree_weights: 0.1
tree_metadata {
is_finalized: true
num_layers_grown: 2
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 2
}
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 0.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [67, 5]
feature_1_values = [9, 17]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([4, 5], new_node_ids)
# Full predictions for each instance would be 8.79 and -5.875,
# so an update from the previous cached values lr*(7.14 and -2) would be
# 1.65 and -3.875, and then multiply them by 0.1 (lr)
self.assertAllClose([[0.1 * 1.65], [0.1 * -3.875]], logits_updates)
def testCachedPredictionFromPreviousTree(self):
"""Tests the predictions work when we have cache from previous trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7
}
}
nodes {
leaf {
scalar: 5
}
}
nodes {
leaf {
scalar: 6
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: true
}
tree_metadata {
is_finalized: false
}
tree_weights: 0.1
tree_weights: 0.1
tree_weights: 0.1
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
# Two examples, one was cached in node 1 first, another in node 2.
cached_tree_ids = [0, 0]
cached_node_ids = [1, 0]
# We have two features: 0 and 1.
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# Example 1 will get to node 3 in tree 1 and node 2 of tree 2
# Example 2 will get to node 2 in tree 1 and node 1 of tree 2
# We are in the last tree.
self.assertAllClose([2, 2], new_tree_ids)
# When using the full tree, the first example will end up in node 4,
# the second in node 5.
self.assertAllClose([2, 1], new_node_ids)
# Example 1: tree 0: 8.79, tree 1: 5.0, tree 2: 5.0 = >
# change = 0.1*(5.0+5.0)
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# change= 0.1(1.14+7.0-7.0)
self.assertAllClose([[1], [0.114]], logits_updates)
def testCachedPredictionFromTheSameTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 3
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are still in the same tree.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4.
self.assertAllClose([1, 1, 3, 4, 1, 1], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
self.assertAllClose([[0.01], [0.01], [0.0553], [0.0783], [0.01], [0.01]],
logits_updates + cached_values)
def testCachedPredictionFromThePreviousTreeWithPostPrunedNodes(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id:0
threshold: 33
left_id: 1
right_id: 2
}
metadata {
gain: -0.2
}
}
nodes {
leaf {
scalar: 0.01
}
}
nodes {
bucketized_split {
feature_id: 1
threshold: 5
left_id: 3
right_id: 4
}
metadata {
gain: 0.5
original_leaf {
scalar: 0.0143
}
}
}
nodes {
leaf {
scalar: 0.0553
}
}
nodes {
leaf {
scalar: 0.0783
}
}
}
trees {
nodes {
leaf {
scalar: 0.55
}
}
}
tree_weights: 1.0
tree_weights: 1.0
tree_metadata {
num_layers_grown: 3
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 2
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.07
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.083
}
post_pruned_nodes_meta {
new_node_id: 3
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 4
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.22
}
post_pruned_nodes_meta {
new_node_id: 1
logit_change: -0.57
}
}
tree_metadata {
num_layers_grown: 1
is_finalized: false
}
growing_metadata {
num_trees_attempted: 2
num_layers_attempted: 4
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [0, 0, 0, 0, 0, 0]
# Leaves 3,4, 7 and 8 got deleted during post-pruning, leaves 5 and 6
# changed the ids to 3 and 4 respectively.
cached_node_ids = [3, 4, 5, 6, 7, 8]
# We have two features: 0 and 1.
feature_0_values = [12, 17, 35, 36, 23, 11]
feature_1_values = [12, 12, 17, 18, 123, 24]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([1, 1, 1, 1, 1, 1], new_tree_ids)
# Examples from leaves 3,4,7,8 should be in leaf 1, examples from leaf 5
# and 6 in leaf 3 and 4 in tree 0. For tree 1, all of the examples are in
# the root node.
self.assertAllClose([0, 0, 0, 0, 0, 0], new_node_ids)
cached_values = [[0.08], [0.093], [0.0553], [0.0783], [0.15 + 0.08],
[0.5 + 0.08]]
root = 0.55
self.assertAllClose([[root + 0.01], [root + 0.01], [root + 0.0553],
[root + 0.0783], [root + 0.01], [root + 0.01]],
logits_updates + cached_values)
def testCachedPredictionTheWholeTreeWasPruned(self):
"""Tests that prediction based on previous node in the tree works."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
leaf {
scalar: 0.00
}
}
}
tree_weights: 1.0
tree_metadata {
num_layers_grown: 1
is_finalized: true
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 0.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: -6.0
}
post_pruned_nodes_meta {
new_node_id: 0
logit_change: 5.0
}
}
growing_metadata {
num_trees_attempted: 1
num_layers_attempted: 1
}
""", tree_ensemble_config)
# Create existing ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
cached_tree_ids = [
0,
0,
]
# The predictions were cached in 1 and 2, both were pruned to the root.
cached_node_ids = [1, 2]
# We have two features: 0 and 1.These are not going to be used anywhere.
feature_0_values = [12, 17]
feature_1_values = [12, 12]
# Grow tree ensemble.
predict_op = boosted_trees_ops.training_predict(
tree_ensemble_handle,
cached_tree_ids=cached_tree_ids,
cached_node_ids=cached_node_ids,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits_updates, new_tree_ids, new_node_ids = session.run(predict_op)
# We are in the last tree.
self.assertAllClose([0, 0], new_tree_ids)
self.assertAllClose([0, 0], new_node_ids)
self.assertAllClose([[-6.0], [5.0]], logits_updates)
class PredictionOpsTest(test_util.TensorFlowTestCase):
"""Tests prediction ops for inference."""
def testPredictionOnEmptyEnsemble(self):
"""Tests that prediction on a empty ensemble does not fail."""
with self.cached_session() as session:
# Create an empty ensemble.
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto='')
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
expected_logits = [[0.0], [0.0]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
def testPredictionMultipleTree(self):
"""Tests the predictions work when we have multiple trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge("""
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 1
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
""", tree_ensemble_config)
# Create existing ensemble with one root split
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [11, 27]
# Example 1: tree 0: 1.14, tree 1: 5.0, tree 2: 5.0 = >
# logit = 0.1*1.14+0.2*5.0+1*5
# Example 2: tree 0: 1.14, tree 1: 7.0, tree 2: -7 = >
# logit= 0.1*1.14+0.2*7.0-1*7.0
expected_logits = [[6.114], [-5.486]]
# Prediction should work fine.
predict_op = boosted_trees_ops.predict(
tree_ensemble_handle,
bucketized_features=[feature_0_values, feature_1_values],
logits_dimension=1)
logits = session.run(predict_op)
self.assertAllClose(expected_logits, logits)
class FeatureContribsOpsTest(test_util.TensorFlowTestCase):
"""Tests feature contribs ops for model understanding."""
def testContribsMultipleTree(self):
"""Tests that the contribs work when we have multiple trees."""
with self.cached_session() as session:
tree_ensemble_config = boosted_trees_pb2.TreeEnsemble()
text_format.Merge(
"""
trees {
nodes {
bucketized_split {
feature_id: 2
threshold: 28
left_id: 1
right_id: 2
}
metadata {
gain: 7.62
original_leaf: {scalar: 2.1}
}
}
nodes {
leaf {
scalar: 1.14
}
}
nodes {
leaf {
scalar: 8.79
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 2
threshold: 26
left_id: 1
right_id: 2
}
}
nodes {
bucketized_split {
feature_id: 0
threshold: 50
left_id: 3
right_id: 4
}
metadata {
original_leaf: {scalar: 5.5}
}
}
nodes {
leaf {
scalar: 7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
nodes {
leaf {
scalar: 6.0
}
}
}
trees {
nodes {
bucketized_split {
feature_id: 0
threshold: 34
left_id: 1
right_id: 2
}
}
nodes {
leaf {
scalar: -7.0
}
}
nodes {
leaf {
scalar: 5.0
}
}
}
tree_weights: 0.1
tree_weights: 0.2
tree_weights: 1.0
tree_metadata: {
num_layers_grown: 1}
tree_metadata: {
num_layers_grown: 2}
tree_metadata: {
num_layers_grown: 1}
""", tree_ensemble_config)
tree_ensemble = boosted_trees_ops.TreeEnsemble(
'ensemble', serialized_proto=tree_ensemble_config.SerializeToString())
tree_ensemble_handle = tree_ensemble.resource_handle
resources.initialize_resources(resources.shared_resources()).run()
feature_0_values = [36, 32]
feature_1_values = [13, -29] # Unused. Feature is not in above ensemble.
feature_2_values = [11, 27]
# Expected logits are computed by traversing the logit path and
# subtracting child logits from parent logits.
bias = 2.1 * 0.1 # Root node of tree_0.
expected_feature_ids = ((2, 2, 0, 0), (2, 2, 0))
# example_0 : (bias, 0.1 * 1.14, 0.2 * 5.5 + .114, 0.2 * 5. + .114,
# 1.0 * 5.0 + 0.2 * 5. + .114)
# example_1 : (bias, 0.1 * 1.14, 0.2 * 7 + .114,
# 1.0 * -7. + 0.2 * 7 + .114)
expected_logits_paths = ((bias, 0.114, 1.214, 1.114, 6.114),
(bias, 0.114, 1.514, -5.486))
bucketized_features = [
feature_0_values, feature_1_values, feature_2_values
]
debug_op = boosted_trees_ops.example_debug_outputs(
tree_ensemble_handle,
bucketized_features=bucketized_features,
logits_dimension=1)
serialized_examples_debug_outputs = session.run(debug_op)
feature_ids = []
logits_paths = []
for example in serialized_examples_debug_outputs:
example_debug_outputs = boosted_trees_pb2.DebugOutput()
example_debug_outputs.ParseFromString(example)
feature_ids.append(example_debug_outputs.feature_ids)
logits_paths.append(example_debug_outputs.logits_path)
self.assertAllClose(feature_ids, expected_feature_ids)
self.assertAllClose(logits_paths, expected_logits_paths)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
he7d3r/revscoring | tests/languages/test_french.py | 2 | 3536 | import pickle
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import french
from .util import compare_extraction
BAD = [
"anus",
"baise", "baisé",
"baiz",
"batar", "batard",
"bite", "bites", "bitte",
"branler", "branlette", "branleur",
"caca", "cacas",
"caliss",
"chiant", "chiante", "chiasse",
"chie", "chié", "chienne", "chier", "chiote", "chiotte",
"con", "conar", "conard", "connar", "connard", "connards", "connasse",
"conne", "connerie", "conneries",
"couille", "couilles", "couillon",
"cul",
"debile", "débile",
"ducon",
"emmerde",
"encule", "enculer", "enculé", "enculés",
"enmerde",
"fesse", "fesses",
"fion",
"foutre",
"homosexuel",
"lesbien",
"marde", "merde", "merdes", "merdique",
"nike", "niker", "nique", "niquer",
"pd",
"pedophile", "pédophile", "pédé",
"petasse",
"pipi",
"pisse",
"poop",
"pouri", "pourri",
"prostitué", "prostituee",
"prout", "proute",
"pue", "pues",
"puta", "putain", "pute", "putes", "putin",
"pénis",
"pétasse",
"quequette",
"queu", "queue",
"salaud",
"salo", "salop", "salope", "salopes",
"sodomie", "sodomiser",
"stupide",
"suce", "sucer", "suceur", "suceuse",
"sucé",
"tapette",
"teub",
"vagin",
"zboub",
"zizi"
]
INFORMAL = [
"ahah",
"allez",
"allo",
"bisous",
"bla", "blabla", "blablabla",
"bonjour",
"coucou",
"etais",
"etes",
"haha",
"hahaha", "hahahaha", "hahahahaha",
"hihi", "hihihi",
"insérez",
"jadore",
"jai",
"kikoo",
"lol", "lool",
"mdr", "mdrr",
"moche",
"ouai",
"ouais",
"ptdr",
"truc",
"voila",
"voulez"
]
OTHER = [
"connection", "fitness", "le"
]
r_text = revision_oriented.revision.text
def test_badwords():
compare_extraction(french.badwords.revision.datasources.matches,
BAD, OTHER)
assert french.badwords == pickle.loads(pickle.dumps(french.badwords))
def test_informals():
compare_extraction(french.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert french.informals == pickle.loads(pickle.dumps(french.informals))
def test_dictionary():
cache = {r_text: "Est un projet principe du worngly. <td>"}
assert (solve(french.dictionary.revision.datasources.dict_words, cache=cache) ==
["Est", "un", "projet", "principe", "du"])
assert (solve(french.dictionary.revision.datasources.non_dict_words,
cache=cache) ==
["worngly"])
assert french.dictionary == pickle.loads(pickle.dumps(french.dictionary))
def test_stopwords():
cache = {r_text: "Est un projet principe du worngly. <td>"}
assert (solve(french.stopwords.revision.datasources.stopwords, cache=cache) ==
["Est", "un", "du"])
assert (solve(french.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
["projet", "principe", "worngly"])
assert french.stopwords == pickle.loads(pickle.dumps(french.stopwords))
def test_stemmed():
cache = {r_text: "Est un projet principe du worngly. <td>"}
assert (solve(french.stemmed.revision.datasources.stems, cache=cache) ==
["est", "un", "projet", "princip", "du", "worngly"])
assert french.stemmed == pickle.loads(pickle.dumps(french.stemmed))
| mit |
RuanAragao/peach | tutorial/fuzzy-logic/norms-conorms.py | 6 | 3359 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/norms-conorms.py
# How to use t-norms and s-norms (norms and conorms)
################################################################################
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
import numpy
from peach.fuzzy import *
from peach.fuzzy.norms import *
# The standard operations with sets -- and thus fuzzy sets -- are intersection,
# union and complement. Fuzzy sets, however, are an extension to classical sets,
# and there are infinite ways to extend those operations. Thus the existence of
# norms, conorms and negations. We show here how to use them in Peach.
# First, remember that we must create the sets. A FuzzySet instance is returned
# when you apply a membership function over a domain. It is, in fact, a
# standard array, but making it a new class allow us to redefine operations.
# Here we create the sets:
x = numpy.linspace(-5.0, 5.0, 500)
a = Triangle(-3.0, -1.0, 1.0)(x)
b = Triangle(-1.0, 1.0, 3.0)(x)
# To set norms, conorms and negations, we use, respectively, the methods
# set_norm, set_conorm and set_negation. Notice that those are class methods, so
# if you change the norm for one instance of a set, you change for them all! So,
# it is better to use the class name to select the methods. Here, we will use
# Zadeh norms, that are already defined in Peach. Notice that we use the
# standard operators for and, or and not operations (respectively, &, | e ~):
FuzzySet.set_norm(ZadehAnd)
FuzzySet.set_conorm(ZadehOr)
aandb_zadeh = a & b # A and B
aorb_zadeh = a | b # A or B
# Probabilistic norms are based on the corresponding operations in probability.
# Here we use them
FuzzySet.set_norm(ProbabilisticAnd)
FuzzySet.set_conorm(ProbabilisticOr)
aandb_prob = a & b
aorb_prob = a | b
# There are other norms that we could use. Please, check the documentation for
# a complete list. Here are some of them:
# Norms: ZadehAnd, ProbabilisticAnd, DrasticProduct, EinsteinProduct
# Conorms: ZadehOr, ProbabilisticOr, DrasticSum, EinsteinSum
# We will use the matplotlib module to plot these functions. We save the plot in
# a figure called 'norms-conorms.png'.
try:
from matplotlib import *
from matplotlib.pylab import *
figure(1).set_size_inches(8, 6)
a1 = axes([ 0.125, 0.555, 0.775, 0.40 ])
a2 = axes([ 0.125, 0.125, 0.775, 0.40 ])
a1.hold(True)
a1.plot(x, a, 'k:')
a1.plot(x, b, 'k:')
a1.plot(x, aandb_zadeh, 'k')
a1.plot(x, aandb_prob, 'k-.')
a1.set_xlim([ -5, 5 ])
a1.set_ylim([ -0.1, 1.1 ])
a1.set_xticks([])
a1.set_yticks([ 0.0, 1.0 ])
a1.legend((r'$A$', r'$B$', 'Zadeh AND', 'Prob. AND'))
a2.hold(True)
a2.plot(x, a, 'k:')
a2.plot(x, b, 'k:')
a2.plot(x, aorb_zadeh, 'k')
a2.plot(x, aorb_prob, 'k-.')
a2.set_xlim([ -5, 5 ])
a2.set_ylim([ -0.1, 1.1 ])
a2.set_xticks([])
a2.set_yticks([ 0.0, 1.0 ])
a2.legend((r'$A$', r'$B$', 'Zadeh OR', 'Prob. OR'))
savefig("norms-conorms.png")
except ImportError:
pass
| lgpl-2.1 |
Johncdy/Candy | Candy/object/gui/CEGUI/ScriptModules/Python/bindings/generators/cegui_base.py | 6 | 39069 | #!/usr/bin/env python2
#/***********************************************************************
# filename: generateCEGUI.py
# created: 13/8/2010
# author: Martin Preisler (with many bits taken from python ogre)
#
# purpose: Generates CEGUI binding code
#*************************************************************************/
#/***************************************************************************
# * Copyright (C) 2004 - 2013 Paul D Turner & The CEGUI Development Team
# *
# * Thanks to Roman Yakovenko for advices and great work on Py++!
# * Thanks to Andy Miller for his python-ogre CEGUI bindings!
# *
# * License: generator is GPL3 (python ogre code generators are also GPL)
# * generated code is MIT as the rest of CEGUI
import os
from pyplusplus import function_transformers as ft
from pyplusplus import messages
from pyplusplus.module_builder import call_policies
from pygccxml import declarations
import common_utils
PACKAGE_NAME = "PyCEGUI"
PACKAGE_VERSION = common_utils.GLOBAL_PACKAGE_VERSION
MODULE_NAME = PACKAGE_NAME
OUTPUT_DIR = os.path.join(common_utils.OUTPUT_DIR, "CEGUI")
def filterDeclarations(mb):
# by default we exclude everything and only include what we WANT in the module
mb.global_ns.exclude()
# because of std::pair<float, float> CEGUI::Thumb::getHorzRange()
mb.global_ns.namespace("std").class_("pair<float, float>").include()
mb.global_ns.namespace("std").class_("pair<CEGUI::String, CEGUI::String>").include()
mb.global_ns.namespace("std").class_("pair<CEGUI::Image*, CEGUI::ImageFactory*>").include()
CEGUI_ns = mb.global_ns.namespace("CEGUI")
### CORE ###
# Affector.h
affector = CEGUI_ns.class_("Affector")
affector.include()
# Animation.h
animation = CEGUI_ns.class_("Animation")
animation.include()
# Animation_xmlHandler.h
# not interesting for python users
# AnimationInstance.h
animationInstance = CEGUI_ns.class_("AnimationInstance")
animationInstance.include()
# AnimationManager.h
animationManager = CEGUI_ns.class_("AnimationManager")
animationManager.include()
# Base.h
# nothing interesting for python users
# todo: maybe pixel align?
# BasicImage.h
basicImage = CEGUI_ns.class_("BasicImage")
basicImage.include()
# BasicInterpolators.h
# nothing interesting for python users
# BasicRenderedStringParser.h
# todo: is this interesting for python?
basicRenderedStringParser = CEGUI_ns.class_("BasicRenderedStringParser")
basicRenderedStringParser.include()
# BiDiVisualMapping.h
# Not exposed since this might be disabled at configure time
#CEGUI_ns.enum("BidiCharType").include()
#bidiVisualMapping = CEGUI_ns.class_("BidiVisualMapping")
#bidiVisualMapping.include()
# BoundSlot.h
boundSlot = CEGUI_ns.class_("BoundSlot")
boundSlot.include()
# also include ref counted variant
CEGUI_ns.class_("RefCounted<CEGUI::BoundSlot>").include()
# CentredRenderedString.h
# todo: is this interesting for python?
centredRenderedString = CEGUI_ns.class_("CentredRenderedString")
centredRenderedString.include()
# ChainedXMLHandler.h
#chainedXMLHandler = CEGUI_ns.class_("ChainedXMLHandler")
#chainedXMLHandler.include()
# Clipboard.h
#nativeClipboardProvider = CEGUI_ns.class_("NativeClipboardProvider")
#nativeClipboardProvider.include()
clipboard = CEGUI_ns.class_("Clipboard")
clipboard.include()
# we don't allow native clipboard subclassing/setting from python (for now?)
clipboard.mem_fun("setNativeProvider").exclude()
clipboard.mem_fun("getNativeProvider").exclude()
# exclude methods that are hard to use from within python
clipboard.mem_fun("setData").exclude()
clipboard.mem_fun("getData").exclude()
# Colour.h
colour = CEGUI_ns.class_("Colour")
colour.include()
# ColourRect.h
colourRect = CEGUI_ns.class_("ColourRect")
colourRect.include()
# Config.h
# nothing interesting for python
# Config_xmlHandler.h
# nothing interesting for python
# CoordConverter.h
coordConverter = CEGUI_ns.class_("CoordConverter")
coordConverter.include()
# DataContainer.h
rawDataContainer = CEGUI_ns.class_("RawDataContainer")
rawDataContainer.include()
rawDataContainer.mem_funs().exclude()
# DefaultLogger.h
defaultLogger = CEGUI_ns.class_("DefaultLogger")
defaultLogger.include()
# DefaultRenderedStringParser.h
# not interesting for python
# DefaultResourceProvider.h
defaultResourceProvider = CEGUI_ns.class_("DefaultResourceProvider")
defaultResourceProvider.include()
# DynamicModule.h
# not doable in python
# /Element.h
element = CEGUI_ns.class_("Element")
element.include()
element.class_("CachedRectf").constructors().exclude()
# Event.h
event = CEGUI_ns.class_("Event")
event.include()
# EventArgs.h
# handled separately, all classes ending with "EventArgs" are included
# EventSet.h
eventSet = CEGUI_ns.class_("EventSet")
eventSet.include()
# this is done via custom code
eventSet.mem_funs("subscribeEvent").exclude()
eventSet.noncopyable = True
# we use depth first to make sure classes "deep in the hierarchy" gets their casts tried first
def collectEventArgsDerived(node):
ret = []
for derived in node.derived:
ret.extend(collectEventArgsDerived(derived.related_class))
ret.append(node)
return ret
eventArgsCastCode = ""
for derived in collectEventArgsDerived(CEGUI_ns.class_("EventArgs")):
eventArgsCastCode += "if (dynamic_cast<const CEGUI::%s*>(&args))\n" % (derived.name)
eventArgsCastCode += "{\n"
eventArgsCastCode += " return boost::python::call<bool>(d_callable, static_cast<const CEGUI::%s&>(args));\n" % (derived.name)
eventArgsCastCode += "}\n\n"
eventSet.add_declaration_code(
"""
class PythonEventSubscription
{
public:
PythonEventSubscription(PyObject* callable):
d_callable(boost::python::incref(callable))
{}
PythonEventSubscription(const PythonEventSubscription& other):
d_callable(boost::python::incref(other.d_callable))
{}
~PythonEventSubscription()
{
boost::python::decref(d_callable);
}
bool operator() (const CEGUI::EventArgs& args) const
{
// FIXME: We have to cast, otherwise only base class gets to python!
// I don't understand why this is happening, I think boost::python should use typeid(args).name() and deduce that it's a
// derived class, not CEGUI::EventArgs base class
// However this is not happening so I have to go through all EventArgs classes and try casting one after another
""" + eventArgsCastCode + """
}
PyObject* d_callable;
};
class PythonEventConnection
{
public:
PythonEventConnection():
d_connection()
{}
PythonEventConnection(const CEGUI::Event::Connection& connection):
d_connection(connection)
{}
PythonEventConnection(const PythonEventConnection& v):
d_connection(v.d_connection)
{}
bool connected()
{
return d_connection.isValid() ? d_connection->connected() : false;
}
void disconnect()
{
// TODO: Throw on invalid disconnects?
if (d_connection.isValid())
{
d_connection->disconnect();
}
}
private:
CEGUI::Event::Connection d_connection;
};
PythonEventConnection EventSet_subscribeEvent(CEGUI::EventSet* self, const CEGUI::String& name, PyObject* callable)
{
return PythonEventConnection(self->subscribeEvent(name, PythonEventSubscription(callable)));
}
"""
)
eventSet.add_registration_code(
"""
def( "subscribeEvent", &EventSet_subscribeEvent);
{ // PythonEventConnection
typedef bp::class_< PythonEventConnection > PythonEventConnection_exposer_t;
PythonEventConnection_exposer_t PythonEventConnection_exposer = PythonEventConnection_exposer_t( "PythonEventConnection" );
bp::scope PythonEventConnection_scope( PythonEventConnection_exposer );
PythonEventConnection_exposer.def( bp::init<>() );
{
typedef bool ( PythonEventConnection::*connected_function_type )( ) ;
PythonEventConnection_exposer.def(
"connected"
, connected_function_type( &PythonEventConnection::connected ) );
}
{
typedef void ( PythonEventConnection::*disconnect_function_type )( ) ;
PythonEventConnection_exposer.def(
"disconnect"
, disconnect_function_type( &PythonEventConnection::disconnect ) );
}
}
"""
)
# Exceptions.h
# handled separately
# FactoryModule.h
# not doable in python
# Font.h
font = CEGUI_ns.class_("Font", recursive = False)
font.include()
# Font_xmlHandler.h
# not interesting for python
# FontGlyph.h
fontGlyph = CEGUI_ns.class_("FontGlyph")
fontGlyph.include()
# FontManager.h
fontManager = CEGUI_ns.class_("FontManager")
fontManager.include()
# FormattedRenderedString.
# todo: is this interesting for python?
# ForwardRefs.h
# hell no
# FreeFunctionSlot.h
# handled differently elsewhere, see EventSet
# FreeTypeFont.h
# not interesting for python
# FribidiVisualMapping.h
# not interesting for python
# FunctorCopySlot.h
# handled differently elsewhere, see EventSet
# FunctorPointerSlot.h
# handled differently elsewhere, see EventSet
# FunctorReferenceBinder.h
# handled differently elsewhere, see EventSet
# FunctorReferenceSlot.h
# handled differently elsewhere, see EventSet
# GeometryBuffer.h
geometryBuffer = CEGUI_ns.class_("GeometryBuffer")
geometryBuffer.include()
# GlobalEventSet.h
globalEventSet = CEGUI_ns.class_("GlobalEventSet")
globalEventSet.include()
# GUIContext.h
guiContext = CEGUI_ns.class_("GUIContext")
guiContext.include()
for decl in guiContext.mem_funs("getMouseCursor"):
if decl.has_const:
decl.exclude()
# GUILayout_xmlHandler.h
# not needed in python
# Image.h
autoScaledModeEnum = CEGUI_ns.enum("AutoScaledMode")
autoScaledModeEnum.include()
image = CEGUI_ns.class_("Image")
image.include()
# ImageCodec.h
imageCodec = CEGUI_ns.class_("ImageCodec")
imageCodec.include()
# ImageManager.h
imageManager = CEGUI_ns.class_("ImageManager")
imageManager.include()
# InputEvent.h
key = CEGUI_ns.class_("Key")
key.include()
mouseButtonEnum = CEGUI_ns.enum("MouseButton")
mouseButtonEnum.include()
# Interpolator.h
interpolator = CEGUI_ns.class_("Interpolator")
interpolator.include()
# IteratorBase.h
# all iterators are sorted later
# JustifiedRenderedString.h
# not needed in python
# KeyFrame.h
keyFrame = CEGUI_ns.class_("KeyFrame")
keyFrame.include()
# LeftAlignedRenderedString.h
# not needed in python
# Logger.h
logger = CEGUI_ns.class_("Logger")
logger.include()
loggingLevelEnum = CEGUI_ns.enum("LoggingLevel")
loggingLevelEnum.include()
# MemberFunctionSlot.h
# sorted elsewhere, see EventSet
# MinibidiVisualMapping.h
# not needed for python
# MinizipResourceProvider.h
# not needed for python
# MouseCursor.h
mouseCursor = CEGUI_ns.class_("MouseCursor")
mouseCursor.include()
mouseCursor.noncopyable = True
# NamedElement.h
namedElement = CEGUI_ns.class_("NamedElement")
namedElement.include()
# NamedXMLResourceManager.h
xmlResourceExistsActionEnum = CEGUI_ns.enum("XMLResourceExistsAction")
xmlResourceExistsActionEnum.include()
resourceEventSet = CEGUI_ns.class_("ResourceEventSet")
resourceEventSet.include()
# PCRERegexMatcher.h
# not needed in python
# PixmapFont.h
# not needed in python
# Property.h
propertyReceiver = CEGUI_ns.class_("PropertyReceiver")
propertyReceiver.include()
property = CEGUI_ns.class_("Property")
property.include()
# PropertyHelper.h
propertyHelper = CEGUI_ns.class_("PropertyHelper_wrapper")
propertyHelper.include()
propertyHelper.rename("PropertyHelper")
# PropertySet.h
propertySet = CEGUI_ns.class_("PropertySet")
propertySet.include()
# Quaternion.h
quaternion = CEGUI_ns.class_("Quaternion")
quaternion.include()
# Rect.h
rectf = CEGUI_ns.class_("Rect<float>")
rectf.rename("Rectf")
rectf.include()
urect = CEGUI_ns.class_("Rect<CEGUI::UDim>")
urect.rename("URect")
urect.include()
# UDim doesn't have the necessary operators for this
urect.member_function("getIntersection").exclude()
urect.member_function("isPointInRect").exclude()
urect.member_function("constrainSizeMin").exclude()
urect.member_function("constrainSizeMax").exclude()
urect.member_function("constrainSize").exclude()
# RefCounted.h
# handled elsewhere
# RefexMatcher.h
regexMatcher = CEGUI_ns.class_("RegexMatcher")
regexMatcher.include()
# RenderedString.h
renderedString = CEGUI_ns.class_("RenderedString")
renderedString.include()
# RenderedStringComponent.h
renderedStringComponent = CEGUI_ns.class_("RenderedStringComponent")
renderedStringComponent.include()
# RenderedStringImageComponent.h
renderedStringImageComponent = CEGUI_ns.class_("RenderedStringImageComponent")
renderedStringImageComponent.include()
# RenderedStringTextComponent.h
renderedStringTextComponent = CEGUI_ns.class_("RenderedStringTextComponent")
renderedStringTextComponent.include()
# RenderedStringParser.h
renderedStringParser = CEGUI_ns.class_("RenderedStringParser")
renderedStringParser.include()
# RenderedStringWidgetComponent.h
renderedStringWidgetComponent = CEGUI_ns.class_("RenderedStringWidgetComponent")
renderedStringWidgetComponent.include()
# RenderEffect.h
renderEffect = CEGUI_ns.class_("RenderEffect")
renderEffect.include()
# RenderEffectFactory.h
renderEffectFactory = CEGUI_ns.class_("RenderEffectFactory")
renderEffectFactory.include()
# RenderEffectManager.h
renderEffectManager = CEGUI_ns.class_("RenderEffectManager")
renderEffectManager.include()
# Renderer.h
quadSplitModeEnum = CEGUI_ns.enum("QuadSplitMode")
quadSplitModeEnum.include()
blendModeEnum = CEGUI_ns.enum("BlendMode")
blendModeEnum.include()
renderer = CEGUI_ns.class_("Renderer")
renderer.include()
# RenderingContext.h
renderingContext = CEGUI_ns.class_("RenderingContext")
renderingContext.include()
# RenderingSurface.h
CEGUI_ns.enum("RenderQueueID").include()
renderingSurface = CEGUI_ns.class_("RenderingSurface")
renderingSurface.include()
# RenderingWindow.h
renderingWindow = CEGUI_ns.class_("RenderingWindow")
renderingWindow.include()
# RenderQueue.h
renderQueue = CEGUI_ns.class_("RenderQueue")
renderQueue.include()
# RenderTarget.h
renderTarget = CEGUI_ns.class_("RenderTarget")
renderTarget.include()
# ResourceProvider.h
resourceProvider = CEGUI_ns.class_("ResourceProvider")
resourceProvider.include()
# RightAlignedRenderedString.h
# not needed in python
# Scheme.h
scheme = CEGUI_ns.class_("Scheme")
scheme.include()
# SchemeManager.h
schemeManager = CEGUI_ns.class_("SchemeManager")
schemeManager.include()
# ScriptModule.h
scriptModule = CEGUI_ns.class_("ScriptModule")
scriptModule.include()
# SimpleTimer.h
simpleTimer = CEGUI_ns.class_("SimpleTimer")
simpleTimer.include()
# Singleton.h
# handled separately
# Size.h
CEGUI_ns.enum("AspectMode").include()
size = CEGUI_ns.class_("Size<float>")
size.rename("Sizef")
size.include()
usize = CEGUI_ns.class_("Size<CEGUI::UDim>")
usize.rename("USize")
usize.include()
# UDim doesn't have the necessary operators for this
usize.member_function("clamp").exclude()
usize.member_function("scaleToAspect").exclude()
# SlotFunctorBase.h
# not needed in python
# String.h
string = CEGUI_ns.class_("String")
#string.include()
# String.h
stringTranscoder = CEGUI_ns.class_("StringTranscoder")
# not useful from Python and very hard to wrap
#stringTranscoder.include()
# SubscriberSlot.h
# todo: probably not needed with current setup, should we exclude this?
subscriberSlot = CEGUI_ns.class_("SubscriberSlot")
subscriberSlot.include()
# exclude the constructor that takes function pointer as argument
for c in subscriberSlot.constructors():
if len(c.arguments) > 0:
c.exclude()
# System.h
system = CEGUI_ns.class_("System")
system.include()
common_utils.excludeAllPrivate(system)
system.mem_fun("getStringTranscoder").exclude()
# SystemKeys.h
systemKeys = CEGUI_ns.class_("SystemKeys")
systemKeys.include()
# Texture.h
texture = CEGUI_ns.class_("Texture")
texture.include()
# TextureTarget.h
textureTarget = CEGUI_ns.class_("TextureTarget")
textureTarget.include()
# TextUtils.h
textUtils = CEGUI_ns.class_("TextUtils")
textUtils.include()
# TplWindowFactory.h
# TplWindowRendererFactory.h
# TplWRFactoryRegisterer.h
# python doesn't like templates :-)
# UDim.h
udim = CEGUI_ns.class_("UDim")
udim.include()
ubox = CEGUI_ns.class_("UBox")
ubox.include()
# Vector.h
vector2f = CEGUI_ns.class_("Vector2<float>")
vector2f.rename("Vector2f")
vector2f.include()
vector3f = CEGUI_ns.class_("Vector3<float>")
vector3f.rename("Vector3f")
vector3f.include()
uvector2 = CEGUI_ns.class_("Vector2<CEGUI::UDim>")
uvector2.rename("UVector2")
uvector2.include()
# Version.h
# nothing usable for python
# Vertex.h
vertex = CEGUI_ns.class_("Vertex")
vertex.include()
# WidgetModule.h
# not needed in python
# Window.h
CEGUI_ns.enum("VerticalAlignment").include()
CEGUI_ns.enum("HorizontalAlignment").include()
CEGUI_ns.enum("WindowUpdateMode").include()
window = CEGUI_ns.class_("Window")
window.include()
# BidiVisualMapping is excluded from python
window.mem_fun("getBidiVisualMapping").exclude()
# python doesn't like void*
window.mem_fun("setUserData").exclude()
window.mem_fun("getUserData").exclude()
# todo: check that getUserData is really a python object
window.add_declaration_code(
"""
void
Window_setUserData ( ::CEGUI::Window & me, PyObject * data ) {
me.setUserData ( data );
}
PyObject *
Window_getUserData ( ::CEGUI::Window & me) {
void * data = me.getUserData ( );
Py_INCREF( (PyObject *) data ); // I'm passing a reference to this object so better inc the ref :)
return (PyObject *) data;
}
typedef bool ( ::CEGUI::Window::*isChild_string_function_type )( const ::CEGUI::String& ) const;
typedef bool ( ::CEGUI::Window::*isChild_ptr_function_type )( const ::CEGUI::Element* ) const;
typedef bool ( ::CEGUI::Window::*isAncestor_string_function_type )( const ::CEGUI::String& ) const;
typedef bool ( ::CEGUI::Window::*isAncestor_ptr_function_type )( const ::CEGUI::Element* ) const;
typedef void ( ::CEGUI::Window::*removeChild_string_function_type )( const ::CEGUI::String& );
typedef void ( ::CEGUI::Window::*removeChild_ptr_function_type )( ::CEGUI::Element* );
"""
)
window.add_registration_code("""def ("setUserData", &::Window_setUserData);""")
window.add_registration_code("""def ("getUserData", &::Window_getUserData);""")
window.add_registration_code("""def ("isChild", isChild_string_function_type(&::CEGUI::Window::isChild));""")
window.add_registration_code("""def ("isChild", isChild_ptr_function_type(&::CEGUI::Window::isChild));""")
window.add_registration_code("""def ("isAncestor", isAncestor_string_function_type(&::CEGUI::Window::isAncestor));""")
window.add_registration_code("""def ("isAncestor", isAncestor_ptr_function_type(&::CEGUI::Window::isAncestor));""")
window.add_registration_code("""def ("removeChild", removeChild_string_function_type(&::CEGUI::Window::removeChild));""")
window.add_registration_code("""def ("removeChild", removeChild_ptr_function_type(&::CEGUI::Window::removeChild));""")
# WindowFactory.h
windowFactory = CEGUI_ns.class_("WindowFactory")
windowFactory.include()
# WindowFactoryManager.h
windowFactoryManager = CEGUI_ns.class_("WindowFactoryManager")
windowFactoryManager.include()
# WindowManager.h
windowManager = CEGUI_ns.class_("WindowManager")
windowManager.include()
# we do all the layout loading in custom code because we can't use PropertyCallback in python
windowManager.mem_fun("loadLayoutFromContainer").exclude()
windowManager.add_declaration_code(
"""
CEGUI::Window*
WindowManager_loadLayoutFromContainer(::CEGUI::WindowManager & me,
const CEGUI::RawDataContainer& container)
{
return me.loadLayoutFromContainer(container);
}
"""
)
windowManager.add_registration_code(
"""
def ("loadLayoutFromContainer", &::WindowManager_loadLayoutFromContainer,\
(bp::arg("source")), \
bp::return_value_policy<bp::reference_existing_object, bp::default_call_policies>());
"""
)
windowManager.mem_fun("loadLayoutFromFile").exclude()
windowManager.add_declaration_code(
"""
CEGUI::Window*
WindowManager_loadLayoutFromFile(::CEGUI::WindowManager & me,
const CEGUI::String& filename, const CEGUI::String& resourceGroup = "")
{
return me.loadLayoutFromFile(filename, resourceGroup);
}
"""
)
windowManager.add_registration_code(
"""
def ("loadLayoutFromFile", &::WindowManager_loadLayoutFromFile,\
(bp::arg("filename"), bp::arg("resourceGroup") = ""), \
bp::return_value_policy<bp::reference_existing_object, bp::default_call_policies>());
"""
)
windowManager.mem_fun("loadLayoutFromString").exclude()
windowManager.add_declaration_code(
"""
CEGUI::Window*
WindowManager_loadLayoutFromString(::CEGUI::WindowManager & me,
const CEGUI::String& source)
{
return me.loadLayoutFromString(source);
}
"""
)
windowManager.add_registration_code(
"""
def ("loadLayoutFromString", &::WindowManager_loadLayoutFromString,\
(bp::arg("source")), \
bp::return_value_policy<bp::reference_existing_object, bp::default_call_policies>());
"""
)
# WindowProperties.h
# not needed in python
# WindowRenderer.h
windowRenderer = CEGUI_ns.class_("WindowRenderer", recursive = False)
windowRenderer.include()
windowRendererFactory = CEGUI_ns.class_("WindowRendererFactory")
windowRendererFactory.include()
# WindowRendererManager.h
windowRendererManager = CEGUI_ns.class_("WindowRendererManager")
windowRendererManager.include()
# WindowRendererModule.h
# not needed in python
# WRFactoryRegisterer.h
# not needed in python
# XMLAttributes.h
xmlAttributes = CEGUI_ns.class_("XMLAttributes")
xmlAttributes.include()
# XMLHandler.h
xmlHandler = CEGUI_ns.class_("XMLHandler")
xmlHandler.include()
# XMLParser.h
xmlParser = CEGUI_ns.class_("XMLParser")
xmlParser.include()
# XMLSerializer.h
xmlSerializer = CEGUI_ns.class_("XMLSerializer")
xmlSerializer.include()
# all EventArgs
for cls in CEGUI_ns.classes():
if cls.name.endswith("EventArgs"):
cls.include()
# all Iterator classes
for cls in CEGUI_ns.classes():
if not declarations.templates.is_instantiation(cls.name):
continue
tplName = declarations.templates.name(cls.name)
if not tplName.endswith("Iterator"):
continue
# now cls is sure to be "Iterator" class
cls.include()
# we only allow default parameter-less constructors
# se lets exclude every constructor that has parameters
cls.constructors(lambda decl: bool(decl.arguments), allow_empty = True, recursive = False).exclude()
if tplName.endswith("BaseIterator"):
import hashlib
# note: I don't like what I am doing here but it's a lazy solution to
# an annoying problem of windows not being able to handle long paths :-/
#
# These are base classes, never instantiated anyways so I think
# it doesn't do much except look nasty :-D
cls.rename("ConstBaseIterator_" + hashlib.md5(cls.name).hexdigest())
else:
# we have to make aliases for operator++ and operator--, these are not supported by python
# instead of operator++, we use next(self) and instead of operator--, we use previous(self)
cls.add_declaration_code(
"""
void Iterator_next(::CEGUI::%s& t)
{
t++;
}
void Iterator_previous(::CEGUI::%s& t)
{
t--;
}
""" % (cls.name, cls.name))
cls.add_registration_code('def("next", &::Iterator_next)')
cls.add_registration_code('def("previous", &::Iterator_previous)')
### ELEMENTS ###
# we always skip the Properties files since they are of no use to python users
# elements/ButtonBase.h
buttonBase = CEGUI_ns.class_("ButtonBase")
buttonBase.include()
# elements/Combobox.h
combobox = CEGUI_ns.class_("Combobox")
combobox.include()
# elements/ComboDropList.h
comboDropList = CEGUI_ns.class_("ComboDropList")
comboDropList.include()
# elements/DefaultWindow.h
defaultWindow = CEGUI_ns.class_("DefaultWindow")
defaultWindow.include()
# elements/DragContainer.h
dragContainer = CEGUI_ns.class_("DragContainer")
dragContainer.include()
# elements/Editbox.h
editbox = CEGUI_ns.class_("Editbox")
editbox.include()
# elements/FrameWindow.h
frameWindow = CEGUI_ns.class_("FrameWindow")
frameWindow.include()
# elements/GridLayoutContainer.h
gridLayoutContainer = CEGUI_ns.class_("GridLayoutContainer")
gridLayoutContainer.include()
# FIXME: Temporary, this should work with transformations
gridLayoutContainer.mem_fun("mapFromIdxToGrid").exclude()
# elements/GroupBox.h
groupBox = CEGUI_ns.class_("GroupBox")
groupBox.include()
# elements/HorizontalLayoutContainer.h
horizontalLayoutContainer = CEGUI_ns.class_("HorizontalLayoutContainer")
horizontalLayoutContainer.include()
# elements/ItemEntry.h
itemEntry = CEGUI_ns.class_("ItemEntry")
itemEntry.include()
# elements/ItemListBase.h
itemListBase = CEGUI_ns.class_("ItemListBase")
itemListBase.include()
# needs function pointer as argument
itemListBase.mem_fun("setSortCallback").exclude()
# elements/ItemListbox.h
itemListbox = CEGUI_ns.class_("ItemListbox")
itemListbox.include()
# elements/LayoutContainer.h
layoutContainer = CEGUI_ns.class_("LayoutContainer")
layoutContainer.include()
# elements/Listbox.h
listbox = CEGUI_ns.class_("Listbox")
listbox.include()
# we have to handle this separately because CEGUI is nasty and will deallocate this for us unless
# we remove it from it, also we have to prevent python from destroying the object
listbox.mem_fun("addItem").exclude()
listbox.mem_fun("removeItem").exclude()
listbox.add_declaration_code(
"""
void
Listbox_addItem(CEGUI::Listbox& self, PyObject* item)
{
CEGUI::ListboxItem* nativeItem = boost::python::extract<CEGUI::ListboxItem*>(boost::python::incref(item));
// passed from python so don't delete it!
nativeItem->setAutoDeleted(false);
self.addItem(nativeItem);
}
void
Listbox_removeItem(CEGUI::Listbox& self, PyObject* item)
{
CEGUI::ListboxItem* nativeItem = boost::python::extract<CEGUI::ListboxItem*>(item);
// don't delete it, python will take care of it
nativeItem->setAutoDeleted(false);
self.removeItem(nativeItem);
boost::python::decref(item);
}
"""
)
listbox.add_registration_code('def ("addItem", &::Listbox_addItem, (bp::arg("item")));')
listbox.add_registration_code('def ("removeItem", &::Listbox_removeItem, (bp::arg("item")));')
# elements/ListboxItem.h
listboxItem = CEGUI_ns.class_("ListboxItem")
listboxItem.include()
# elements/ListboxTextItem.h
listboxTextItem = CEGUI_ns.class_("ListboxTextItem")
listboxTextItem.include()
# elements/ListHeader.h
listHeader = CEGUI_ns.class_("ListHeader")
listHeader.include()
# elements/ListHeaderSegment.h
listHeaderSegment = CEGUI_ns.class_("ListHeaderSegment")
listHeaderSegment.include()
# elements/Menubar.h
menubar = CEGUI_ns.class_("Menubar")
menubar.include()
# elements/MenuBase.h
menuBase = CEGUI_ns.class_("MenuBase")
menuBase.include()
# elements/MenuItem.h
menuItem = CEGUI_ns.class_("MenuItem")
menuItem.include()
# elements/MultiColumnList.h
CEGUI_ns.class_("MCLGridRef").include()
multiColumnList = CEGUI_ns.class_("MultiColumnList")
multiColumnList.include()
# internal private class
multiColumnList.class_("ListRow").exclude()
# internal
multiColumnList.mem_fun("pred_descend").exclude()
# elements/MultiLineEditbox.h
multiLineEditbox = CEGUI_ns.class_("MultiLineEditbox")
multiLineEditbox.include()
# elements/PopupMenu.h
popupMenu = CEGUI_ns.class_("PopupMenu")
popupMenu.include()
# elements/ProgressBar.h
progressBar = CEGUI_ns.class_("ProgressBar")
progressBar.include()
# elements/PushButton.h
pushButton = CEGUI_ns.class_("PushButton")
pushButton.include()
# elements/RadioButton.h
radioButton = CEGUI_ns.class_("RadioButton")
radioButton.include()
# elements/ScrollablePane.h
scrollablePane = CEGUI_ns.class_("ScrollablePane")
scrollablePane.include()
# elements/Scrollbar.h
scrollbar = CEGUI_ns.class_("Scrollbar")
scrollbar.include()
# elements/ScrolledContainer.h
scrolledContainer = CEGUI_ns.class_("ScrolledContainer")
scrolledContainer.include()
# elements/ScrolledItemListBase.h
scrolledItemListBase = CEGUI_ns.class_("ScrolledItemListBase")
scrolledItemListBase.include()
# elements/SequentialLayoutContainer.h
sequentialLayoutContainer = CEGUI_ns.class_("SequentialLayoutContainer")
sequentialLayoutContainer.include()
# elements/Slider.h
slider = CEGUI_ns.class_("Slider")
slider.include()
# elements/Spinner.h
spinner = CEGUI_ns.class_("Spinner")
spinner.include()
# elements/TabButton.h
tabButton = CEGUI_ns.class_("TabButton")
tabButton.include()
# elements/TabButton.h
tabControl = CEGUI_ns.class_("TabControl")
tabControl.include()
# elements/Thumb.h
thumb = CEGUI_ns.class_("Thumb")
thumb.include()
# elements/Titlebar.h
titlebar = CEGUI_ns.class_("Titlebar")
titlebar.include()
# elements/ToggleButton.h
toggleButton = CEGUI_ns.class_("ToggleButton")
toggleButton.include()
# elements/Tooltip.h
tooltip = CEGUI_ns.class_("Tooltip", recursive = False)
tooltip.include()
# elements/Tree.h
tree = CEGUI_ns.class_("Tree")
tree.include()
# todo: this doesn't work, needs function transformation
tree.mem_fun("getNextSelectedItemFromList").exclude()
# elements/TreeItem.h
treeItem = CEGUI_ns.class_("TreeItem")
treeItem.include()
# elements/VerticalLayoutContainer.h
verticalLayoutContainer = CEGUI_ns.class_("VerticalLayoutContainer")
verticalLayoutContainer.include()
### FALAGARD ###
# falagard/FalComponentBase.h
falagardComponentBase = CEGUI_ns.class_("FalagardComponentBase")
falagardComponentBase.include()
# falagard/FalDimensions.h
CEGUI_ns.class_("BaseDim").include()
CEGUI_ns.class_("OperatorDim").include()
CEGUI_ns.class_("AbsoluteDim").include()
CEGUI_ns.class_("ImageDim").include()
CEGUI_ns.class_("ImagePropertyDim").include()
CEGUI_ns.class_("WidgetDim").include()
CEGUI_ns.class_("UnifiedDim").include()
CEGUI_ns.class_("FontDim").include()
CEGUI_ns.class_("PropertyDim").include()
CEGUI_ns.class_("Dimension").include()
CEGUI_ns.class_("ComponentArea").include()
# falagard/FalEnums.h
CEGUI_ns.enum("DimensionType").include()
CEGUI_ns.enum("VerticalFormatting").include()
CEGUI_ns.enum("HorizontalFormatting").include()
CEGUI_ns.enum("VerticalTextFormatting").include()
CEGUI_ns.enum("HorizontalTextFormatting").include()
CEGUI_ns.enum("FontMetricType").include()
CEGUI_ns.enum("DimensionOperator").include()
CEGUI_ns.enum("FrameImageComponent").include()
CEGUI_ns.enum("ChildEventAction").include()
# falagard/EventAction.h
eventAction = CEGUI_ns.class_("EventAction")
eventAction.include()
# falagard/FalEventLinkDefinition.h
eventLinkDefinition = CEGUI_ns.class_("EventLinkDefinition")
eventLinkDefinition.include()
# falagard/FalFrameComponent.h
frameComponent = CEGUI_ns.class_("FrameComponent")
frameComponent.include()
# falagard/FalImageryComponent.h
imageryComponent = CEGUI_ns.class_("ImageryComponent")
imageryComponent.include()
# falagard/FalImagerySection.h
imagerySection = CEGUI_ns.class_("ImagerySection")
imagerySection.include()
# falagard/FalLayerSpecification.h
layerSpecification = CEGUI_ns.class_("LayerSpecification")
layerSpecification.include()
# falagard/FalNamedArea.h
namedArea = CEGUI_ns.class_("NamedArea")
namedArea.include()
# falagard/FalPropertyDefinition.h
# FIXME: This is now a template and CE did not know what to do!
#propertyDefinition = CEGUI_ns.class_("PropertyDefinition")
#propertyDefinition.include()
# falagard/FalPropertyDefinitionBase.h
propertyDefinitionBase = CEGUI_ns.class_("PropertyDefinitionBase")
propertyDefinitionBase.include()
# falagard/FalPropertyInitialiser.h
propertyInitialiser = CEGUI_ns.class_("PropertyInitialiser")
propertyInitialiser.include()
# falagard/FalPropertyLinkDefinition.h
# FIXME: This is now a template and CE did not know what to do!
#propertyLinkDefinition = CEGUI_ns.class_("PropertyLinkDefinition")
#propertyLinkDefinition.include()
# falagard/FalSectionSpecification.h
sectionSpecification = CEGUI_ns.class_("SectionSpecification")
sectionSpecification.include()
# falagard/FalStateImagery.h
stateImagery = CEGUI_ns.class_("StateImagery")
stateImagery.include()
# falagard/FalTextComponent.h
textComponent = CEGUI_ns.class_("TextComponent")
textComponent.include()
# falagard/FalWidgetComponent.h
widgetComponent = CEGUI_ns.class_("WidgetComponent")
widgetComponent.include()
# falagard/FalWidgetLookFeel.h
widgetLookFeel = CEGUI_ns.class_("WidgetLookFeel")
widgetLookFeel.include()
# falagard/FalWidgetLookManager.h
widgetLookManager = CEGUI_ns.class_("WidgetLookManager")
widgetLookManager.include()
# falagard/FalXMLEnumHelper.h
falagardXMLHelper = CEGUI_ns.class_("FalagardXMLHelper_wrapper")
falagardXMLHelper.include()
falagardXMLHelper.rename("FalagardXMLHelper")
# todo: hack fixes
# taken from python ogre, causes AttributeError at import if not excluded
for cls in CEGUI_ns.classes():
try:
cls.variable("EventNamespace").exclude()
except:
pass
## turns out that in SOME classes this also fails registration (Combodroplist for example)
for cls in CEGUI_ns.classes():
try:
cls.variable("WidgetTypeName").exclude()
except:
pass
# taken from python ogre
## lets work around a bug in GCCXMl - http://language-binding.net/pygccxml/design.html#patchers
draws = mb.mem_funs("draw") # find all the draw functions
for draw in draws:
for arg in draw.arguments:
if arg.default_value == "0ffffffff":
arg.default_value = "0xffffffff"
for cls in CEGUI_ns.classes():
if cls.name.startswith("Singleton"):
cls.include()
if cls.name.startswith("NamedXMLResourceManager"):
cls.include()
# WORKAROUND: would not compile on Windows, pyplusplus substitutes the internal _Rb_tree_iterator
cls.mem_fun("destroyObject").exclude()
# no need for this function, just use getSingleton
mb.mem_funs("getSingletonPtr").exclude()
# at no point should you need any private methods
common_utils.excludeAllPrivate(CEGUI_ns)
def configureExceptions(mb):
exception = mb.namespace("CEGUI").class_("Exception")
exception.include()
exception.mem_funs().exclude()
exception.mem_fun("setStdErrEnabled").include()
exception.mem_fun("isStdErrEnabled").include()
exception.translate_exception_to_string("PyExc_RuntimeError", "exc.what()")
def generate():
### disable unnecessary warnings
# can't pass function pointer
messages.disable(messages.W1004)
# operator not supported
messages.disable(messages.W1014)
# py++ will create a wrapper
messages.disable(messages.W1023, messages.W1025, messages.W1026, messages.W1027, messages.W1031)
# static pointer member can't be exported
messages.disable(messages.W1035)
# immutable pointer can't be exposed
messages.disable(messages.W1036)
# pointer to function can't be exposed
messages.disable(messages.W1036, messages.W1037)
# can't be overridden in python
messages.disable(messages.W1049)
mb = common_utils.createModuleBuilder("python_CEGUI.h", ["CEGUIBASE_EXPORTS"])
CEGUI_ns = mb.global_ns.namespace("CEGUI")
# first thing to do - converters!
# !!! They have to be registered first, otherwise it will all fail horribly !!!
common_utils.addStringConverters(mb)
common_utils.addSupportForString(mb)
filterDeclarations(mb)
configureExceptions(mb)
common_utils.setDefaultCallPolicies(CEGUI_ns)
## add additional version information to the module to help identify it correctly
# todo: this should be done automatically
common_utils.addVersionInfo(mb, PACKAGE_NAME, PACKAGE_VERSION)
# Creating code creator. After this step you should not modify/customize declarations.
mb.build_code_creator(module_name = MODULE_NAME, doc_extractor = common_utils.createDocumentationExtractor())
common_utils.writeModule(mb, OUTPUT_DIR)
if __name__ == "__main__":
common_utils.verbose_generate("CEGUIBase", generate)
| gpl-3.0 |
fr34k8/atomic-reactor | tests/plugins/test_add_labels.py | 3 | 3006 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from ordereddict import OrderedDict
from dockerfile_parse import DockerfileParser
from atomic_reactor.core import DockerTasker
from atomic_reactor.inner import DockerBuildWorkflow
from atomic_reactor.plugin import PreBuildPluginsRunner
from atomic_reactor.plugins.pre_add_labels_in_df import AddLabelsPlugin
from atomic_reactor.util import ImageName
from tests.constants import MOCK_SOURCE
import json
import pytest
from flexmock import flexmock
class Y(object):
pass
class X(object):
image_id = "xxx"
source = Y()
source.dockerfile_path = None
source.path = None
base_image = ImageName(repo="qwe", tag="asd")
DF_CONTENT = """\
FROM fedora
RUN yum install -y python-django
CMD blabla"""
LABELS_CONF_BASE = {"Config": {"Labels": {"label1": "base value"}}}
LABELS_CONF = OrderedDict({'label1': 'value 1', 'label2': 'long value'})
LABELS_CONF_WRONG = [('label1', 'value1'), ('label2', 'value2')]
LABELS_BLANK = {}
# Can't be sure of the order of the labels, expect either
EXPECTED_OUTPUT = [r"""FROM fedora
RUN yum install -y python-django
LABEL "label1"="value 1" "label2"="long value"
CMD blabla""", r"""FROM fedora
RUN yum install -y python-django
LABEL "label2"="long value" "label1"="value 1"
CMD blabla"""]
EXPECTED_OUTPUT2 = [r"""FROM fedora
RUN yum install -y python-django
LABEL "label2"="long value"
CMD blabla"""]
EXPECTED_OUTPUT3 = [DF_CONTENT]
@pytest.mark.parametrize('labels_conf_base, labels_conf, dont_overwrite, expected_output', [
(LABELS_CONF_BASE, LABELS_CONF, [], EXPECTED_OUTPUT),
(LABELS_CONF_BASE, json.dumps(LABELS_CONF), [], EXPECTED_OUTPUT),
(LABELS_CONF_BASE, LABELS_CONF_WRONG, [], RuntimeError()),
(LABELS_CONF_BASE, LABELS_CONF, ["label1", ], EXPECTED_OUTPUT2),
(LABELS_CONF_BASE, LABELS_BLANK, ["label1", ], EXPECTED_OUTPUT3),
])
def test_add_labels_plugin(tmpdir, labels_conf_base, labels_conf, dont_overwrite, expected_output):
df = DockerfileParser(str(tmpdir))
df.content = DF_CONTENT
tasker = DockerTasker()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image')
setattr(workflow, 'builder', X)
flexmock(workflow, base_image_inspect=labels_conf_base)
setattr(workflow.builder, 'df_path', df.dockerfile_path)
runner = PreBuildPluginsRunner(
tasker,
workflow,
[{
'name': AddLabelsPlugin.key,
'args': {'labels': labels_conf, "dont_overwrite": dont_overwrite}
}]
)
if isinstance(expected_output, RuntimeError):
with pytest.raises(RuntimeError):
runner.run()
else:
runner.run()
assert AddLabelsPlugin.key is not None
assert df.content in expected_output
| bsd-3-clause |
crosswalk-project/chromium-crosswalk-efl | tools/memory_inspector/memory_inspector/frontends/command_line.py | 83 | 5717 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Command line frontend for Memory Inspector"""
import json
import memory_inspector
import optparse
import os
import time
from memory_inspector import constants
from memory_inspector.classification import mmap_classifier
from memory_inspector.core import backends
from memory_inspector.data import serialization
def main():
COMMANDS = ['devices', 'ps', 'stats', 'mmaps', 'classified_mmaps']
usage = ('%prog [options] ' + ' | '.join(COMMANDS))
parser = optparse.OptionParser(usage=usage)
parser.add_option('-b', '--backend', help='Backend name '
'(e.g., Android)', type='string', default='Android')
parser.add_option('-s', '--device_id', help='Device '
'id (e.g., Android serial)', type='string')
parser.add_option('-p', '--process_id', help='Target process id',
type='int')
parser.add_option('-m', '--filter_process_name', help='Process '
'name to match', type='string')
parser.add_option('-r', '--mmap_rule',
help='mmap rule', type='string',
default=os.path.join(constants.CLASSIFICATION_RULES_PATH,
'default', 'mmap-android.py'))
(options, args) = parser.parse_args()
memory_inspector.RegisterAllBackends()
if not args or args[0] not in COMMANDS:
parser.print_help()
return -1
if args[0] == 'devices':
_ListDevices(options.backend)
return 0
number_of_devices = 0
if options.device_id:
device_id = options.device_id
number_of_devices = 1
else:
for device in backends.ListDevices():
if device.backend.name == options.backend:
number_of_devices += 1
device_id = device.id
if number_of_devices == 0:
print "No devices connected"
return -1
if number_of_devices > 1:
print ('More than 1 device connected. You need to provide'
' --device_id')
return -1
device = backends.GetDevice(options.backend, device_id)
if not device:
print 'Device', device_id, 'does not exist'
return -1
device.Initialize()
if args[0] == 'ps':
if not options.filter_process_name:
print 'Listing all processes'
else:
print ('Listing processes matching '
+ options.filter_process_name.lower())
print ''
print '%-10s : %-50s : %12s %12s %12s' % (
'Process ID', 'Process Name', 'RUN_TIME', 'THREADS',
'MEM_RSS_KB')
print ''
for process in device.ListProcesses():
if (not options.filter_process_name or
options.filter_process_name.lower() in process.name.lower()):
stats = process.GetStats()
run_time_min, run_time_sec = divmod(stats.run_time, 60)
print '%10s : %-50s : %6s m %2s s %8s %12s' % (
process.pid, _Truncate(process.name, 50), run_time_min,
run_time_sec, stats.threads, stats.vm_rss)
return 0
if not options.process_id:
print 'You need to provide --process_id'
return -1
process = device.GetProcess(options.process_id)
if not process:
print 'Cannot find process [%d] on device %s' % (
options.process_id, device.id)
return -1
elif args[0] == 'stats':
_ListProcessStats(process)
return 0
elif args[0] == 'mmaps':
_ListProcessMmaps(process)
return 0
elif args[0] == 'classified_mmaps':
_ListProcessClassifiedMmaps(process, options.mmap_rule)
return 0
def _ListDevices(backend_name):
print 'Device list:'
print ''
for device in backends.ListDevices():
if device.backend.name == backend_name:
print '%-16s : %s' % (device.id, device.name)
def _ListProcessStats(process):
"""Prints process stats periodically
"""
print 'Stats for process: [%d] %s' % (process.pid, process.name)
print '%-10s : %-50s : %12s %12s %13s %12s %14s' % (
'Process ID', 'Process Name', 'RUN_TIME', 'THREADS',
'CPU_USAGE', 'MEM_RSS_KB', 'PAGE_FAULTS')
print ''
while True:
stats = process.GetStats()
run_time_min, run_time_sec = divmod(stats.run_time, 60)
print '%10s : %-50s : %6s m %2s s %8s %12s %13s %11s' % (
process.pid, _Truncate(process.name, 50), run_time_min, run_time_sec,
stats.threads, stats.cpu_usage, stats.vm_rss, stats.page_faults)
time.sleep(1)
def _ListProcessMmaps(process):
"""Prints process memory maps
"""
print 'Memory Maps for process: [%d] %s' % (process.pid, process.name)
print '%-10s %-10s %6s %12s %12s %13s %13s %-40s' % (
'START', 'END', 'FLAGS', 'PRIV.DIRTY', 'PRIV.CLEAN',
'SHARED DIRTY', 'SHARED CLEAN', 'MAPPED_FILE')
print '%38s %12s %12s %13s' % ('(kb)', '(kb)', '(kb)', '(kb)')
print ''
maps = process.DumpMemoryMaps()
for entry in maps.entries:
print '%-10x %-10x %6s %12s %12s %13s %13s %-40s' % (
entry.start, entry.end, entry.prot_flags,
entry.priv_dirty_bytes / 1024, entry.priv_clean_bytes / 1024,
entry.shared_dirty_bytes / 1024,
entry.shared_clean_bytes / 1024, entry.mapped_file)
def _ListProcessClassifiedMmaps(process, mmap_rule):
"""Prints process classified memory maps
"""
maps = process.DumpMemoryMaps()
if not os.path.exists(mmap_rule):
print 'File', mmap_rule, 'not found'
return
with open(mmap_rule) as f:
rules = mmap_classifier.LoadRules(f.read())
classified_results_tree = mmap_classifier.Classify(maps, rules)
print json.dumps(classified_results_tree, cls=serialization.Encoder)
def _Truncate(name, max_length):
if len(name) <= max_length:
return name
return '%s...' % name[0:(max_length - 3)]
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/local/lib/python2.7/encodings/unicode_internal.py | 827 | 1196 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
FundersClub/django-papertrail | test_site/test_site/settings.py | 1 | 3219 | """
Django settings for test_site project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0%te(_0m&$gr#4#1_&2_%d_mc2+-t11s*&1w-dk_cg-zo1_zqp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_app',
'papertrail',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'test_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'test_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'papertrail-test',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 |
komatits/specfem3d | CUBIT_GEOCUBIT/geocubitlib/exportlib.py | 1 | 53812 | #!/usr/bin/env python
#############################################################################
# exportlib.py
# this file is part of GEOCUBIT #
# #
# Created by Emanuele Casarotti #
# Copyright (c) 2008 Istituto Nazionale di Geofisica e Vulcanologia #
# #
#############################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., #
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. #
# #
#############################################################################
#
try:
import start as start
cubit = start.start_cubit()
except:
try:
import cubit
except:
print "error importing cubit, check if cubit is installed"
pass
import glob
from utilities import get_cubit_version
print 'version 2.2'
class VersionException(Exception):
pass
class MergingError(Exception):
pass
def invert_dict(d):
inv = {}
for k, v in d.iteritems():
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def importing_cubfiles(cubfiles):
import re
rule_st = re.compile("(.+)_[0-9]+\.")
rule_ex = re.compile(".+_[0-9]+\.(.+)")
rule_int = re.compile(".+_([0-9]+)\.")
filenames = glob.glob(cubfiles)
try:
st = rule_st.findall(filenames[0])[0]
ex = rule_ex.findall(filenames[0])[0]
listflag = True
except:
ex = ''
listflag = False
if ex == 'cub':
cubflag = True
else:
cubflag = False
list_int = []
fs = []
try:
for f in filenames:
i = int(rule_int.findall(f)[0])
list_int.append(i)
list_int.sort()
for i, ind in enumerate(list_int):
f = st + '_' + str(ind) + '.' + ex
fs.append(f)
except:
pass
if listflag:
filenames = fs
else:
pass
return len(filenames), list_int, filenames, cubflag
def collect_new(cpuxmin=0, cpuxmax=1, cpuymin=0, cpuymax=1, cpux=1, cpuy=1,
cubfiles=False, ckbound_method1=False, ckbound_method2=False,
merge_tolerance=None, curverefining=False,
outfilename='totalmesh_merged', qlog=False,
export2SPECFEM3D=False, listblock=None, listflag=None,
outdir='.', add_sea=False, decimate=False, cpml=False,
cpml_size=False, top_absorbing=False, hex27=False,
save_cubfile=False, check_merging=False,
starting_tolerance=100.):
#
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
# cubit.cmd('set error off')
version_cubit = get_cubit_version()
if decimate:
if version_cubit >= 14.0:
raise VersionException('check cubit version, decimate capability \
has been tested only with cubit <= 12.2')
if version_cubit <= 12.2:
collecting_merging(cpuxmin, cpuxmax, cpuymin, cpuymax, cpux, cpuy,
cubfiles=cubfiles, ckbound_method1=ckbound_method1,
ckbound_method2=ckbound_method2,
merge_tolerance=merge_tolerance, decimate=decimate)
elif version_cubit >= 14:
collecting_merging_new(cpuxmin, cpuxmax + 1, cpuymin, cpuymax + 1,
cpux, cpuy, cubfiles=cubfiles,
check_merging=check_merging,
starting_tolerance=starting_tolerance)
else:
raise VersionException('check cubit version, parallel capability \
of geocubit is working with \
cubit/trelis 14 or later (or cubit 12.2)')
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
# cubit.cmd('set error on')
#
if curverefining:
if version_cubit <= 12.2:
block = 1001 # topography
refine_closecurve(block, curverefining, acis=True)
else:
raise VersionException(
'check cubit version, refine curve capability has been tested \
only with cubit <= 12.2')
#
#
if add_sea:
if version_cubit <= 12.2:
block = 1001
add_sea_layer(block=block)
else:
raise VersionException(
'check cubit version, sea capability has been tested \
only with cubit <= 12.2')
outdir2 = '/'.join(x for x in outfilename.split('/')[:-1])
if outdir2 == '':
outdir2 = outdir + '/'
else:
outdir2 = outdir + '/' + outdir2 + '/'
import os
try:
os.makedirs(outdir2)
except OSError:
pass
cubit.cmd('compress all')
command = "export mesh '" + outdir2 + outfilename + \
".e' block all overwrite xml '" + outdir2 + outfilename + ".xml'"
cubit.cmd(command)
f = open(outdir2 + 'blocks.dat', 'w')
blocks = cubit.get_block_id_list()
#
for block in blocks:
name = cubit.get_exodus_entity_name('block', block)
element_count = cubit.get_exodus_element_count(block, "block")
nattrib = cubit.get_block_attribute_count(block)
attr = [cubit.get_block_attribute_value(
block, x) for x in range(0, nattrib)]
ty = cubit.get_block_element_type(block)
f.write(str(block) + '; ' + name + ' ; nattr ' + str(nattrib) + ' ; ' +
' '.join(str(x) for x in attr) + ' ; ' + ty + ' ' +
str(element_count) + '\n')
f.close()
#
#
cubit.cmd('set info echo journ off')
cmd = 'del group all'
cubit.silent_cmd(cmd)
cubit.cmd('set info echo journ on')
#
#
print 'end meshing'
#
#
if qlog:
print '\n\nQUALITY CHECK.... ***************\n\n'
import quality_log
tq = open(outdir2 + outfilename + '.quality', 'w')
max_skewness, min_length = quality_log.quality_log(tq)
#
#
#
if export2SPECFEM3D:
e2SEM(files=False, listblock=listblock,
listflag=listflag, outdir=outdir,
cpml=cpml, cpml_size=cpml_size,
top_absorbing=top_absorbing, hex27=hex27)
if save_cubfile:
vol_blocks = [x for x in blocks if x <= 1000]
cubit.cmd("create mesh geometry block " +
' '.join(str(x) for x in vol_blocks) +
" feature_angle 135.0")
command = "save as '" + outdir2 + outfilename + ".cub' overwrite"
print command
cubit.cmd(command)
collect = collect_new
define_blocks = collect_new
def e2SEM(files=False, listblock=None, listflag=None, outdir='.',
cpml=False, cpml_size=False, top_absorbing=False, hex27=False):
import glob
if files:
filenames = glob.glob(files)
for f in filenames:
print f
extension = f.split('.')[-1]
if extension == 'cub':
cubit.cmd('open "' + f + '"')
elif extension == 'e':
cubit.cmd('import mesh "' + f + '" no_geom')
else:
print extension
if listblock and listflag:
pass
else:
listblock = []
listflag = []
block_list = list(cubit.get_block_id_list())
for block in block_list:
ty = cubit.get_block_element_type(block)
if 'HEX' in ty:
listblock.append(block)
# listflag.append(block)
listflag = range(1, len(block_list) + 1)
#
for ib, iflag in zip(listblock, listflag):
cubit.cmd("block " + str(ib) + " attribute count 1")
cubit.cmd("block " + str(ib) + " attribute index 1 " + str(iflag))
#
import cubit2specfem3d
cubit2specfem3d.export2SPECFEM3D(outdir, cpml=cpml, cpml_size=cpml_size,
top_absorbing=top_absorbing, hex27=hex27)
def collecting_block(store_group_name, ip=0, xmin=[0], xmax=[0], ymin=[0],
ymax=[0], index_block=0):
block_list = list(cubit.get_block_id_list())
block_list.sort()
block_hex = [x for x in block_list if x <= 1000]
block_side = [x for x in block_list if x > 1000]
for ib, block in enumerate(block_hex):
if index_block == 0:
cubit.cmd("group 'vol" + str(block) +
"' add Hex in block " + str(block))
store_group_name.append('vol' + str(block))
cubit.cmd("del block " + str(block))
else:
cubit.cmd("group '" + store_group_name[ib] +
"' add Hex in block " + str(block))
cubit.cmd("del block " + str(block))
for ib, side in enumerate(block_side):
if side == 1004:
if ip in ymin:
cubit.cmd("group 'ymin' add face in block " + str(side))
else:
cubit.cmd("group 'lateral' add face in block " + str(side))
elif side == 1003:
if ip in xmin:
cubit.cmd("group 'xmin' add face in block " + str(side))
else:
cubit.cmd("group 'lateral' add face in block " + str(side))
elif side == 1006:
if ip in ymax:
cubit.cmd("group 'ymax' add face in block " + str(side))
else:
cubit.cmd("group 'lateral' add face in block " + str(side))
elif side == 1005:
if ip in xmax:
cubit.cmd("group 'xmax' add face in block " + str(side))
else:
cubit.cmd("group 'lateral' add face in block " + str(side))
elif side == 1001:
cubit.cmd("group 'topo' add face in block " + str(side))
elif side == 1002:
cubit.cmd("group 'bot' add face in block " + str(side))
cubit.cmd("del block " + str(side))
return store_group_name
def check_lateral_nodes(name_group='lateral'):
cubit.cmd("group 'lateral_nodes' add Node in face in group " + name_group)
ilateral_nodes = cubit.get_id_from_name('lateral_nodes')
lateral_nodes = cubit.get_group_nodes(ilateral_nodes)
cubit.cmd('del group ' + str(ilateral_nodes))
print name_group, ' nodes ', len(lateral_nodes)
return lateral_nodes
def prepare_equivalence_new(name_group='lateral'):
length = {}
cmd = "group 'tmpn' add edge in face in group " + name_group
cubit.cmd(cmd)
ge = cubit.get_id_from_name("tmpn")
e1 = cubit.get_group_edges(ge)
lengthmin = 1e9
for e in e1:
lengthmin = min(lengthmin, cubit.get_mesh_edge_length(e))
length[e] = lengthmin * .5
cubit.cmd('delete group ' + str(ge))
minvalue = min(length.values())
maxvalue = max(length.values())
#
print 'min lentgh: ', minvalue, 'max lentgh: ', maxvalue
nbin = int((maxvalue / minvalue)) + 1
factor = (maxvalue - minvalue) / nbin
dic_new = {}
for k in length.keys():
if factor != 0.:
dic_new[k] = int((length[k] - minvalue) / factor)
else:
dic_new[k] = 0.
inv_length = invert_dict(dic_new)
print inv_length.keys(), factor, minvalue
ks = inv_length.keys()
ks.sort()
for k in range(0, len(inv_length.keys()) - 1):
inv_length[ks[k]] = inv_length[ks[k]] + inv_length[ks[k + 1]]
return factor, minvalue, maxvalue, inv_length
def merging_node_new(tol, clean=True, graphic_debug=False):
empty = False
print 'tolerance ', tol
cubit.cmd("topology check coincident node node in \
group coincident_lateral_nodes tolerance " +
str(tol) + " highlight brief result \
group 'merging_lateral_nodes'")
group_exist = cubit.get_id_from_name("merging_lateral_nodes")
if not group_exist:
print 'no nodes in this tolerance range'
else:
merging_nodes = cubit.get_group_nodes(group_exist)
if graphic_debug:
cubit.cmd('draw group lateral')
cubit.cmd('high group merging_lateral_nodes')
print 'merging ', len(merging_nodes), ' nodes.....'
cubit.cmd("equivalence node in merging_lateral_nodes \
tolerance " + str(tol * 2))
if clean:
cubit.cmd("group coincident_lateral_nodes \
remove node in group merging_lateral_nodes")
cubit.cmd("delete Group merging_lateral_nodes")
ic_nodes = cubit.get_id_from_name('coincident_lateral_nodes')
c_nodes = cubit.get_group_nodes(ic_nodes)
print len(c_nodes)
if len(c_nodes) == 0:
empty = True
if graphic_debug:
cubit.cmd('draw group lateral')
cubit.cmd('high group coincident_lateral_nodes')
cubit.cmd('quality hex all jacobian \
global high 0 draw mesh draw add')
return empty
def graphic_merging(tol, step_tol=None, maxtol=None):
"""
routine for merging chunks in cubit/trelis GUI
tol :: tolerance starting value
step_tol :: the value that iteratively increases tol
maxtol :: max value of tolerance
"""
if not step_tol:
step_tol = tol / 10.
if not maxtol:
maxtol = tol * 100
cubit.cmd('group \'coincident_lateral_nodes\' add \
Node in face in group lateral')
isempty = False
while isempty:
isempty = merging_node_new(tol, clean=True, graphic_debug=True)
tol = tol + step_tol
if tol > maxtol:
print 'tolerance greater than the max length of the edges, \
please check the mesh'
def collecting_merging_new(cpuxmin=0, cpuxmax=0, cpuymin=0, cpuymax=0, cpux=1,
cpuy=1, cubfiles=False, check_merging=False,
starting_tolerance=None, step_tolerance=None):
# import glob
# import re
#
##
try:
from boundary_definition import check_bc, map_boundary
except:
pass
#
print 'number of chunks: ', cpux * cpuy
number_of_chunks = cpux * cpuy
xmin, xmax, ymin, ymax, listfull = map_boundary(
cpuxmin, cpuxmax, cpuymin, cpuymax, cpux, cpuy)
print 'xmin: ', xmin
print 'xmax: ', xmax
print 'ymin: ', ymin
print 'ymax: ', ymax
print 'full list: ', listfull
if 1 < number_of_chunks < max(listfull):
raise MergingError('error mapping the chunks')
#
if cubfiles:
nf, listip, filenames, cubflag = importing_cubfiles(cubfiles)
print nf, listip, filenames, cubflag, listfull
else:
nf = 0
filenames = []
ip = 0
#
index_block = -1
store_group_name = []
side_name = ['topo', 'xmin', 'ymin',
'xmax', 'ymax', 'bot']
side_val = ['1001', '1003', '1004',
'1005', '1006', '1002']
side_block_name = ['face_topo', 'face_abs_xmin', 'face_abs_ymin',
'face_abs_xmax', 'face_abs_ymax', 'face_abs_bottom']
cubit.cmd('set duplicate block elements on')
if nf > 0:
for ip, filename in zip(listip, filenames):
print ip, filename, ip in listfull
try:
if ip in listfull:
print filename
index_block = index_block + 1
if cubflag:
cubit.cmd('import cubit "' + filename + '"')
else:
cubit.cmd('import mesh "' + filename +
'" block all no_geom')
except:
cubit.cmd('import mesh "' + filename + '" block all no_geom')
# print ip,xmin,xmax,ymin,ymax,ip in xmin,ip in xmax,ip in ymin,ip
# in ymax
store_tmp = collecting_block(
store_group_name, ip, xmin, xmax, ymin, ymax, index_block)
if len(store_tmp) != 0:
store_group_name = store_tmp
# lateral_nodes = check_lateral_nodes()
cubit.cmd('save as "tmp_nomerging.cub" overwrite ')
else:
if number_of_chunks == 1:
from geocubitlib import boundary_definition
boundary_definition.define_bc()
else:
check_bc(ip, xmin, xmax, ymin, ymax, cpux, cpuy,
cpuxmin, cpuxmax + 1, cpuymin, cpuymax + 1)
cubit.cmd('disassociate mesh from volume all')
cubit.cmd('del vol all')
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
return
factor, minvalue, maxvalue, inv_length = prepare_equivalence_new()
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
if starting_tolerance:
tol = starting_tolerance
else:
tol = minvalue / 20.
if step_tolerance:
step_tol = step_tolerance
else:
step_tol = minvalue / 20.
cubit.cmd('group \'coincident_lateral_nodes\' add \
Node in face in group lateral')
isempty = False
while not isempty:
isempty = merging_node_new(tol, clean=True, graphic_debug=False)
tol = tol + step_tol
if tol > maxvalue * 1.5:
raise MergingError(
'tolerance greater than the max length of the edges, \
please check the mesh')
# if checknodes and checklines:
for ig, g in enumerate(store_group_name):
cubit.cmd('block ' + str(ig + 1) + ' hex in group ' + g)
cubit.cmd('block ' + str(ig + 1) + ' name "vol' + str(ig + 1) + '"')
print 'block ' + str(ig + 1) + ' hex in group ' + g
for ig, g in enumerate(side_name):
cubit.cmd('block ' + side_val[ig] + ' face in group ' + g)
print 'block ' + side_val[ig] + ' face in group ' + g
cubit.cmd('block ' + side_val[ig] +
' name "' + side_block_name[ig] + '"')
cubit.cmd('del group all')
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
# cubit.cmd('set error on')
# deprecated methods
def add_sea_layer(block=1001, optionsea=False):
if optionsea:
# sea=optionsea['sea']
seaup = optionsea['seaup']
sealevel = optionsea['sealevel']
seathres = optionsea['seathres']
else:
# sea=False
seaup = False
sealevel = False
seathres = False
# TODO
# add sea hex
# change hex absoorbing....
block_list = cubit.get_block_id_list()
id_block = max(block for block in block_list if block < 1000)
cubit.cmd('delete block ' + str(id_block))
# sea
command = 'block ' + str(id_block) + ' hex in node in face in block ' + \
str(block) + ' with Z_coord < ' + str(seathres)
cubit.cmd(command)
command = "block " + str(id_block) + " name 'sea'"
cubit.cmd(command)
if not seaup:
id_block += 1
cmd = 'block ' + str(id_block) + ' hex in node in face in block ' +\
str(block) + ' with (Z_coord > ' + str(seathres) + \
' and Z_coord < ' + str(sealevel) + ')'
cubit.cmd(cmd)
command = "block " + str(id_block) + " name 'shwater'"
cubit.cmd(command)
id_block += 1
command = 'block ' + str(id_block) + ' hex in node in face in block ' + \
str(block) + ' with Z_coord >= ' + str(sealevel)
cubit.cmd(command)
command = "block " + str(id_block) + " name 'continent'"
cubit.cmd(command)
def importing_cubfiles_old(cubfiles):
import re
rule_st = re.compile("(.+)_[0-9]+\.")
rule_ex = re.compile(".+_[0-9]+\.(.+)")
rule_int = re.compile(".+_([0-9]+)\.")
filenames = glob.glob(cubfiles)
try:
st = rule_st.findall(filenames[0])[0]
ex = rule_ex.findall(filenames[0])[0]
listflag = True
except:
ex = ''
listflag = False
if ex == 'cub':
cubflag = True
else:
cubflag = False
list_int = []
fs = []
try:
for f in filenames:
i = int(rule_int.findall(f)[0])
list_int.append(i)
list_int.sort()
for i, ind in enumerate(list_int):
f = st + '_' + str(ind) + '.' + ex
fs.append(f)
except:
pass
if listflag:
filenames = fs
else:
pass
return len(filenames), list_int, filenames, cubflag
def refine_closecurve(block=1001, closed_filenames=None, acis=True):
from utilities import load_curves
from boundary_definition import build_block_side, define_surf
from mesh_volume import refine_inside_curve
#
#
curves = []
if not isinstance(closed_filenames, list):
closed_filenames = [closed_filenames]
for f in closed_filenames:
print f
if acis:
curves = curves + load_curves(f)
print curves
blist = list(cubit.get_block_id_list())
try:
blist.remove(1001)
except:
pass
try:
blist.remove(1002)
except:
pass
try:
blist.remove(1003)
except:
pass
try:
blist.remove(1004)
except:
pass
try:
blist.remove(1005)
except:
pass
try:
blist.remove(1006)
except:
pass
id_top = max(blist)
cmd = 'group "coi" add node in hex in block ' + str(id_top)
cubit.cmd(cmd)
#
id_inside_arc = None
for c in map(int, curves[0].split()): # curves is a list of one string
c1001 = cubit.get_exodus_element_count(1001, "block")
c1002 = cubit.get_exodus_element_count(1002, "block")
c1003 = cubit.get_exodus_element_count(1003, "block")
c1004 = cubit.get_exodus_element_count(1004, "block")
c1005 = cubit.get_exodus_element_count(1005, "block")
c1006 = cubit.get_exodus_element_count(1006, "block")
#
refine_inside_curve(c, ntimes=1, depth=1, block=block, surface=False)
blist = list(cubit.get_block_id_list())
cmd = 'create mesh geometry hex all except hex in \
block all feature_angle 135'
cubit.cmd(cmd)
blist_after = list(cubit.get_block_id_list())
[blist_after.remove(x) for x in blist]
id_inside = max(blist_after)
cmd = 'group "coi" add node in hex in block ' + str(id_inside)
cubit.cmd(cmd)
if id_inside_arc:
cmd = 'del block ' + str(id_inside - 1)
cubit.cmd(cmd)
cmd = 'block ' + str(id_inside) + ' name "refined"'
cubit.cmd(cmd)
id_inside_arc = id_inside
#
_, _, _, _, _, top_surf, bottom_surf, surf_xmin, \
surf_ymin, surf_xmax, surf_ymax = define_surf()
#
c1001_after = cubit.get_exodus_element_count(1001, "block")
c1002_after = cubit.get_exodus_element_count(1002, "block")
c1003_after = cubit.get_exodus_element_count(1003, "block")
c1004_after = cubit.get_exodus_element_count(1004, "block")
c1005_after = cubit.get_exodus_element_count(1005, "block")
c1006_after = cubit.get_exodus_element_count(1006, "block")
entity = 'face'
if c1001_after != c1001:
refname = entity + '_topo'
build_block_side(top_surf, refname, obj=entity, id_0=1001)
#
if c1002_after != c1002:
refname = entity + '_bottom'
build_block_side(bottom_surf, refname, obj=entity, id_0=1002)
#
if c1003_after != c1003:
refname = entity + '_abs_xmin'
build_block_side(surf_xmin, refname, obj=entity, id_0=1003)
#
if c1004_after != c1004:
refname = entity + '_abs_ymin'
build_block_side(surf_ymin, refname, obj=entity, id_0=1004)
#
if c1005_after != c1005:
refname = entity + '_abs_xmax'
build_block_side(surf_xmax, refname, obj=entity, id_0=1005)
#
if c1006_after != c1006:
refname = entity + '_abs_ymax'
build_block_side(surf_ymax, refname, obj=entity, id_0=1006)
#
cmd = 'disassociate mesh from volume all'
cubit.cmd(cmd)
cmd = 'group "coi" add node in face in \
block 1001 1002 1003 1004 1005 1006'
cubit.cmd(cmd)
cubit.cmd('del vol all')
cubit.cmd('group "removedouble" add hex all except hex in block all')
cubit.cmd('delete hex in removedouble')
cubit.cmd('delet group removedouble')
cmd = 'equivalence node in group coi tolerance 20'
cubit.cmd(cmd)
cmd = 'equivalence node all tolerance 10'
cubit.cmd(cmd)
cubit.cmd('del curve ' + ' '.join(str(x) for x in curves))
def collecting_merging(cpuxmin=0, cpuxmax=1, cpuymin=0, cpuymax=1, cpux=1,
cpuy=1, cubfiles=False, ckbound_method1=False,
ckbound_method2=False, merge_tolerance=None,
decimate=False):
boundary_dict = {}
##
try:
from boundary_definition import check_bc, map_boundary
except:
pass
#
xmin, xmax, ymin, ymax, listfull = map_boundary(
cpuxmin, cpuxmax, cpuymin, cpuymax, cpux, cpuy)
#
if cubfiles:
nf, listip, filenames, cubflag = importing_cubfiles(cubfiles)
else:
nf = 0
filenames = []
ip = 0
#
if nf > 0:
for ip, filename in zip(listip, filenames):
try:
if ip in listfull:
if cubflag:
cubit.cmd('import cubit "' + filename + '"')
else:
cubit.cmd('import mesh geometry "' + filename +
'" block all use nodeset sideset \
feature_angle 135.00 linear merge')
if decimate:
cubit.cmd(
'refine volume all numsplit 1 bias 1.0 depth 1 ')
boundary = check_bc(ip, xmin, xmax, ymin, ymax,
cpux, cpuy, cpuxmin, cpuxmax,
cpuymin, cpuymax)
boundary_dict[ip] = boundary
list_vol = list(cubit.parse_cubit_list('volume', 'all'))
for v in list_vol:
cubit.cmd("disassociate mesh from volume " + str(v))
command = "del vol " + str(v)
cubit.cmd(command)
except:
cubit.cmd('import mesh geometry "' + filename +
'" block all use nodeset sideset \
feature_angle 135.00 linear merge')
if decimate:
cubit.cmd('refine volume all numsplit 1 bias 1.0 depth 1 ')
ip = 0
boundary = check_bc(ip, xmin, xmax, ymin, ymax,
cpux, cpuy, cpuxmin, cpuxmax,
cpuymin, cpuymax)
boundary_dict[ip] = boundary
list_vol = list(cubit.parse_cubit_list('volume', 'all'))
for v in list_vol:
cubit.cmd("disassociate mesh from volume " + str(v))
command = "del vol " + str(v)
cubit.cmd(command)
cubit.cmd('export mesh "tmp_collect_NOmerging.e" \
dimension 3 block all overwrite')
else:
if decimate:
cubit.cmd('refine volume all numsplit 1 bias 1.0 depth 1 ')
boundary = check_bc(ip, xmin, xmax, ymin, ymax, cpux,
cpuy, cpuxmin, cpuxmax, cpuymin, cpuymax)
#
#
# print boundary_dict
block_list = cubit.get_block_id_list()
for block in block_list:
ty = cubit.get_block_element_type(block)
if ty == 'HEX8':
cubit.cmd('block ' + str(block) + ' name "vol' + str(block) + '"')
#
#
print 'chbound', ckbound_method1, ckbound_method2
if ckbound_method1 and not ckbound_method2 and len(filenames) != 1:
# use the equivalence method for groups
if isinstance(merge_tolerance, list):
tol = merge_tolerance[0]
elif merge_tolerance:
tol = merge_tolerance
else:
tol = 100000
#
idiag = None
# cubit.cmd('set info off')
# cubit.cmd('set journal off')
# cubit.cmd('set echo off')
ind = 0
for ix in range(cpuxmin, cpuxmax):
for iy in range(cpuymin, cpuymax):
ind = ind + 1
ip = iy * cpux + ix
print '******************* ', ip, ind, '/', len(listfull)
#
# ileft | ip
# --------------------
# idiag | idown
#
#
if ip not in xmin and ip not in ymin:
ileft = iy * cpux + ix - 1
idown = (iy - 1) * cpux + ix
idiag = idown - 1
elif ip in xmin and ip in ymin:
ileft = ip
idown = ip
idiag = None
elif ip in xmin:
ileft = ip
idown = (iy - 1) * cpux + ix
idiag = idown
elif ip in ymin:
ileft = iy * cpux + ix - 1
idown = ip
idiag = ileft
#
print ip, ileft, idiag, idown
if ip != idown:
nup = boundary_dict[ip]['nodes_surf_ymin']
ndow = boundary_dict[idown]['nodes_surf_ymax']
merge_node_ck(nup, ndow)
if idiag != idown:
if ip in ymax and ip not in xmin:
# node in curve chunck left up... r u
nlu = boundary_dict[ip]['node_curve_xminymax']
nru = boundary_dict[ileft]['node_curve_xmaxymax']
merge_node(nlu, nru)
if ip in xmax:
# node in curve chunck left up... r u
nrd = boundary_dict[ip]['node_curve_xmaxymin']
nru = boundary_dict[idown]['node_curve_xmaxymax']
merge_node(nrd, nru)
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xminymin']
nrd = boundary_dict[idown]['node_curve_xminymax']
nld = boundary_dict[idiag]['node_curve_xmaxymax']
nlu = boundary_dict[ileft]['node_curve_xmaxymin']
merge_node_4(nru, nrd, nld, nlu)
elif ip in xmin:
# node in curve chunck right up... r u
nlu = boundary_dict[ip]['node_curve_xminymin']
nld = boundary_dict[idown]['node_curve_xminymax']
merge_node(nld, nlu)
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xmaxymin']
nrd = boundary_dict[idown]['node_curve_xmaxymax']
merge_node(nrd, nru)
#
if ip != ileft:
nright = boundary_dict[ip]['nodes_surf_xmin']
nleft = boundary_dict[ileft]['nodes_surf_xmax']
merge_node_ck(nright, nleft)
#
#
if ip in ymin:
# node in curve chunck right down... r u
nrd = boundary_dict[ip]['node_curve_xminymin']
nld = boundary_dict[ileft]['node_curve_xmaxymin']
merge_node(nrd, nld)
if ip in ymax:
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xminymax']
nlu = boundary_dict[ileft]['node_curve_xmaxymax']
merge_node(nlu, nru)
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
#
#
cmd = 'group "negativejac" add quality hex all Jacobian high'
cubit.cmd(cmd)
group_id_1 = cubit.get_id_from_name("negativejac")
n1 = cubit.get_group_nodes(group_id_1)
if len(n1) != 0:
print 'error, negative jacobian after the equivalence node command, \
use --merge2 instead of --equivalence/--merge/--merge1'
elif ckbound_method2 and not ckbound_method1 and len(filenames) != 1:
if isinstance(merge_tolerance, list):
tol = merge_tolerance[0]
elif merge_tolerance:
tol = merge_tolerance
else:
tol = 100000
#
idiag = None
for ix in range(cpuxmin, cpuxmax):
for iy in range(cpuymin, cpuymax):
ip = iy * cpux + ix
print '******************* ', ip
#
# ileft | ip
# --------------------
# idiag | idown
#
#
if ip not in xmin and ip not in ymin:
ileft = iy * cpux + ix - 1
idown = (iy - 1) * cpux + ix
idiag = idown - 1
elif ip in xmin and ip in ymin:
ileft = ip
idown = ip
elif ip in xmin:
ileft = ip
idown = (iy - 1) * cpux + ix
idiag = idown
elif ip in ymin:
ileft = iy * cpux + ix - 1
idown = ip
idiag = ileft
#
#
if ip != idown:
nup = boundary_dict[ip]['nodes_surf_ymin']
ndow = boundary_dict[idown]['nodes_surf_ymax']
for n1, n2 in zip(nup, ndow):
cubit.cmd('equivalence node ' + str(n1) +
' ' + str(n2) + ' tolerance ' + str(tol))
if idiag != idown:
if ip in ymax and ip not in xmin:
# node in curve chunck left up... r u
nlu = boundary_dict[ip]['node_curve_xminymax']
nru = boundary_dict[ileft]['node_curve_xmaxymax']
for n in zip(nlu, nru):
cubit.cmd('equivalence node ' +
' '.join(str(x) for x in n) +
' tolerance ' + str(tol))
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xminymin']
nrd = boundary_dict[idown]['node_curve_xminymax']
nld = boundary_dict[idiag]['node_curve_xmaxymax']
nlu = boundary_dict[ileft]['node_curve_xmaxymin']
for n in zip(nru, nrd, nlu, nld):
cubit.cmd('equivalence node ' +
' '.join(str(x) for x in n) +
' tolerance ' + str(tol))
elif ip in xmin:
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xminymin']
nrd = boundary_dict[idown]['node_curve_xminymax']
for n in zip(nru, nrd):
cubit.cmd('equivalence node ' +
' '.join(str(x) for x in n) +
' tolerance ' + str(tol))
#
#
if ip != ileft:
nright = boundary_dict[ip]['nodes_surf_xmin']
nleft = boundary_dict[ileft]['nodes_surf_xmax']
for n1, n2 in zip(nleft, nright):
cubit.cmd('equivalence node ' + str(n1) +
' ' + str(n2) + ' tolerance ' + str(tol))
#
#
if ip in ymin:
# node in curve chunck right down... r u
nrd = boundary_dict[ip]['node_curve_xminymin']
nld = boundary_dict[ileft]['node_curve_xmaxymin']
for n in zip(nrd, nld):
cubit.cmd('equivalence node ' +
' '.join(str(x) for x in n) +
' tolerance ' + str(tol))
if ip in ymax:
# node in curve chunck right up... r u
nru = boundary_dict[ip]['node_curve_xminymax']
nlu = boundary_dict[ileft]['node_curve_xmaxymax']
for n in zip(nru, nlu):
cubit.cmd('equivalence node ' +
' '.join(str(x) for x in n) +
' tolerance ' + str(tol))
#
#
cmd = 'topology check coincident node face all tolerance ' + \
str(tol * 2) + ' nodraw brief result group "checkcoinc"'
cubit.silent_cmd(cmd)
group_id_1 = cubit.get_id_from_name("checkcoinc")
if group_id_1 != 0:
n1 = cubit.get_group_nodes(group_id_1)
if len(n1) != 0:
print 'error, coincident nodes after the equivalence \
node command, check the tolerance'
import sys
sys.exit()
cmd = 'group "negativejac" add quality hex all Jacobian high'
cubit.cmd(cmd)
group_id_1 = cubit.get_id_from_name("negativejac")
n1 = cubit.get_group_nodes(group_id_1)
if len(n1) != 0:
print 'error, negative jacobian after the equivalence node command, \
check the mesh'
elif ckbound_method1 and ckbound_method2 and len(filenames) != 1:
block_list = cubit.get_block_id_list()
i = -1
for block in block_list:
ty = cubit.get_block_element_type(block)
if ty == 'HEX8':
i = i + 1
if isinstance(merge_tolerance, list):
try:
tol = merge_tolerance[i]
except:
tol = merge_tolerance[-1]
elif merge_tolerance:
tol = merge_tolerance
else:
tol = 1
cmd = 'topology check coincident node face in hex in block ' +\
str(block) + ' tolerance ' + str(tol) + \
' nodraw brief result group "b' + str(block) + '"'
cubit.cmd(cmd)
print cmd
cmd = 'equivalence node in group b' + \
str(block) + ' tolerance ' + str(tol)
cubit.cmd(cmd)
print cmd
if isinstance(merge_tolerance, list):
tol = max(merge_tolerance)
elif merge_tolerance:
tol = merge_tolerance
else:
tol = 1
#
#
cmd = 'topology check coincident node face all tolerance ' +\
str(tol) + ' nodraw brief result group "checkcoinc"'
cubit.silent_cmd(cmd)
group_id_1 = cubit.get_id_from_name("checkcoinc")
if group_id_1 != 0:
n1 = cubit.get_group_nodes(group_id_1)
if len(n1) != 0:
print 'error, coincident nodes after the equivalence node \
command, check the tolerance'
import sys
sys.exit()
cmd = 'group "negativejac" add quality hex all Jacobian high'
cubit.silent_cmd(cmd)
group_id_1 = cubit.get_id_from_name("negativejac")
n1 = cubit.get_group_nodes(group_id_1)
if len(n1) != 0:
print 'error, negative jacobian after the equivalence node command, \
use --merge instead of --equivalence'
def collect_old(cpuxmin=0, cpuxmax=1, cpuymin=0, cpuymax=1, cpux=1, cpuy=1,
cubfiles=False, ckbound_method1=False, ckbound_method2=False,
merge_tolerance=None, curverefining=False,
outfilename='totalmesh_merged', qlog=False,
export2SPECFEM3D=False, listblock=None,
listflag=None, outdir='.', add_sea=False, decimate=False,
cpml=False, cpml_size=False, top_absorbing=False, hex27=False):
#
# cubit.cmd('set journal error off')
# cubit.cmd('set verbose error off')
collecting_merging(cpuxmin, cpuxmax, cpuymin, cpuymax, cpux, cpuy,
cubfiles=cubfiles, ckbound_method1=ckbound_method1,
ckbound_method2=ckbound_method2,
merge_tolerance=merge_tolerance, decimate=decimate)
# cubit.cmd('set journal error on')
# cubit.cmd('set verbose error on')
#
if curverefining:
block = 1001 # topography
refine_closecurve(block, curverefining, acis=True)
#
#
if add_sea:
block = 1001
add_sea_layer(block=block)
outdir2 = '/'.join(x for x in outfilename.split('/')[:-1])
if outdir2 == '':
outdir2 = outdir + '/'
else:
outdir2 = outdir + '/' + outdir2 + '/'
import os
try:
os.makedirs(outdir2)
except OSError:
pass
cubit.cmd('compress all')
command = "export mesh '" + outdir2 + outfilename + \
".e' block all overwrite xml '" + outdir2 + outfilename + ".xml'"
cubit.cmd(command)
f = open(outdir2 + 'blocks.dat', 'w')
blocks = cubit.get_block_id_list()
#
for block in blocks:
name = cubit.get_exodus_entity_name('block', block)
element_count = cubit.get_exodus_element_count(block, "block")
nattrib = cubit.get_block_attribute_count(block)
attr = [cubit.get_block_attribute_value(
block, x) for x in range(0, nattrib)]
ty = cubit.get_block_element_type(block)
f.write(str(block) + ' ; ' + name + ' ; nattr ' + str(nattrib) +
' ; ' + ' '.join(str(x) for x in attr) + ' ; ' + ty + ' ' +
str(element_count) + '\n')
f.close()
#
#
cubit.cmd('set info echo journ off')
cmd = 'del group all'
cubit.silent_cmd(cmd)
cubit.cmd('set info echo journ on')
#
command = "save as '" + outdir2 + outfilename + ".cub' overwrite"
cubit.cmd(command)
#
print 'end meshing'
#
#
if qlog:
print '\n\nQUALITY CHECK.... ***************\n\n'
import quality_log
tq = open(outdir2 + outfilename + '.quality', 'w')
max_skewness, min_length = quality_log.quality_log(tq)
#
#
#
if export2SPECFEM3D:
e2SEM(files=False, listblock=listblock,
listflag=listflag, outdir=outdir,
cpml=cpml, cpml_size=cpml_size,
top_absorbing=top_absorbing, hex27=hex27)
def e2SEM_old(files=False, listblock=None, listflag=None, outdir='.',
cpml=False, cpml_size=False, top_absorbing=False, hex27=False):
import glob
if files:
filenames = glob.glob(files)
for f in filenames:
print f
extension = f.split('.')[-1]
if extension == 'cub':
cubit.cmd('open "' + f + '"')
elif extension == 'e':
cubit.cmd('import mesh "' + f + '" no_geom')
else:
print extension
if listblock and listflag:
pass
else:
listblock = []
listflag = []
block_list = list(cubit.get_block_id_list())
for block in block_list:
ty = cubit.get_block_element_type(block)
if 'HEX' in ty:
listblock.append(block)
# listflag.append(block)
listflag = range(1, len(block_list) + 1)
#
for ib, iflag in zip(listblock, listflag):
cubit.cmd("block " + str(ib) + " attribute count 1")
cubit.cmd("block " + str(ib) + " attribute index 1 " + str(iflag))
#
import cubit2specfem3d
import os
if not os.path.exists(outdir):
os.mkdir(outdir)
cubit2specfem3d.export2SPECFEM3D(outdir, cpml=cpml, cpml_size=cpml_size,
top_absorbing=top_absorbing, hex27=hex27)
def prepare_equivalence(nodes1, nodes2):
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
length = {}
for ns in zip(nodes1, nodes2):
cmd = 'group "tmpn" add edge in node ' + ' '.join(str(n) for n in ns)
cubit.cmd(cmd)
ge = cubit.get_id_from_name("tmpn")
e1 = cubit.get_group_edges(ge)
lengthmin = 1e9
for e in e1:
lengthmin = min(lengthmin, cubit.get_mesh_edge_length(e))
length[ns] = lengthmin * .5
cubit.cmd('delete group ' + str(ge))
minvalue = min(length.values())
maxvalue = max(length.values())
print 'min lentgh: ', minvalue, 'max lentgh: ', maxvalue
nbin = int((maxvalue / minvalue) / 2.) + 1
factor = (maxvalue - minvalue) / nbin
dic_new = {}
for k in length.keys():
if factor != 0.:
dic_new[k] = int((length[k] - minvalue) / factor)
else:
dic_new[k] = 0.
inv_length = invert_dict(dic_new)
print inv_length.keys(), factor, minvalue
ks = inv_length.keys()
ks.sort()
for k in range(0, len(inv_length.keys()) - 1):
inv_length[ks[k]] = inv_length[ks[k]] + inv_length[ks[k + 1]]
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
return factor, minvalue, inv_length
def merge_node_ck(n1, n2):
factor, minvalue, inv_length = prepare_equivalence(n1, n2)
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
# cubit.cmd('set error off')
for k in inv_length.keys()[:-1]:
if len(inv_length[k]) > 0:
cmd = 'equivalence node ' + \
' '.join(' '.join(str(n) for n in x)
for x in inv_length[k]) +\
' tolerance ' + str(k * factor + minvalue / 3.)
cubit.cmd(cmd)
print 'equivalence ' + str(len(inv_length[k])) +\
' couples of nodes - tolerance ' + \
str(k * factor + minvalue / 3.)
cubit.cmd('group "checkmerge" add node ' +
' '.join(str(n) for n in n1) +
' ' + ' '.join(str(n) for n in n2))
idg = cubit.get_id_from_name('checkmerge')
remainnodes = cubit.get_group_nodes(idg)
print 'from ' + str(len(n1) + len(n2)) + ' nodes -> ' + \
str(len(remainnodes)) + ' nodes'
if len(n1) != len(remainnodes):
print 'equivalence ' + str(len(remainnodes)) + \
' couples of nodes - tolerance ' + str(minvalue / 3.)
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
cmd = 'equivalence node in group ' + \
str(idg) + ' tolerance ' + str(minvalue / 3.)
cubit.cmd(cmd)
cmd = 'block 3000 node in group ' + str(idg)
cubit.cmd(cmd)
if len(n1) != len(remainnodes):
cubit.cmd('export mesh "error_merging.e" \
dimension 3 block all overwrite')
cubit.cmd('save as "error_merging.cub" \
dimension 3 block all overwrite')
print 'error merging '
if False:
import sys
sys.exit(2)
cubit.cmd('delete group checkmerge')
cubit.cmd('delete block 3000')
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
def merge_node(n1, n2):
factor, minvalue, inv_length = prepare_equivalence(n1, n2)
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
for k in inv_length.keys()[:-1]:
if len(inv_length[k]) > 0:
cmd = 'equivalence node ' + \
' '.join(' '.join(str(n) for n in x)
for x in inv_length[k]) +\
' tolerance ' + str(k * factor + minvalue / 3.)
cubit.cmd(cmd)
print 'equivalence ' + str(len(inv_length[k])) + \
' couples of nodes - tolerance ' + \
str(k * factor + minvalue / 3.)
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
def prepare_equivalence_4(nodes1, nodes2, nodes3, nodes4):
cubit.cmd('set info off')
cubit.cmd('set echo off')
cubit.cmd('set journal off')
length = {}
nodes = [nodes1, nodes2, nodes3, nodes4]
check = map(len, nodes)
checked_nodes = []
for ind, iflag in enumerate(check):
if iflag:
checked_nodes = checked_nodes + nodes[ind]
cmd = 'group "tmpn" add edge in node ' + \
' '.join(str(n) for n in checked_nodes)
cubit.cmd(cmd)
ge = cubit.get_id_from_name("tmpn")
e1 = cubit.get_group_edges(ge)
lengthmin = 1e9
for e in e1:
lengthmin = min(lengthmin, cubit.get_mesh_edge_length(e))
length[e] = lengthmin * .5
cubit.cmd('delete group ' + str(ge))
try:
minvalue = min(length.values())
maxvalue = max(length.values())
except:
try:
print nodes
print 'edges ', e1
except:
pass
minvalue = 10.
maxvalue = 2000.
print 'min lentgh: ', minvalue, 'max lentgh: ', maxvalue
nbin = int((maxvalue / minvalue) / 2.) + 1
factor = (maxvalue - minvalue) / nbin
dic_new = {}
for k in length.keys():
if factor != 0.:
dic_new[k] = int((length[k] - minvalue) / factor)
else:
dic_new[k] = 0.
inv_length = invert_dict(dic_new)
print inv_length.keys(), factor, minvalue
ks = inv_length.keys()
ks.sort()
for k in range(0, len(inv_length.keys()) - 1):
inv_length[ks[k]] = inv_length[ks[k]] + inv_length[ks[k + 1]]
cubit.cmd('set info on')
cubit.cmd('set echo on')
cubit.cmd('set journal on')
return factor, minvalue, inv_length
def ording_z(nodes):
def get_z(node):
x, y, z = cubit.get_nodal_coordinates(node)
return z
d = [(get_z(node), node) for node in nodes]
d.sort()
return [x[1] for x in d]
def merge_node_4(n1, n2, n3, n4, newmethod=True):
if newmethod:
print "merge node 4 side"
n1o = ording_z(n1)
n2o = ording_z(n2)
n3o = ording_z(n3)
n4o = ording_z(n4)
for ln in zip(n1o, n2o, n3o, n4o):
cmd = 'equivalence node ' + \
' '.join(str(n) for n in ln) + ' tolerance 10000 '
cubit.cmd(cmd)
else:
factor, minvalue, inv_length = prepare_equivalence_4(n1, n2, n3, n4)
for k in inv_length.keys()[:-1]:
if len(inv_length[k]) > 1:
try:
for x in inv_length[k]:
if type(x) is not list:
x = [x]
else:
pass
cmd = 'equivalence node ' + \
' '.join(' '.join(str(n) for n in x)) + \
' tolerance ' + str(k * factor + minvalue / 3.)
except:
print k, "***************************************** s"
print inv_length[k]
cubit.cmd(cmd)
print 'equivalence ' + str(len(inv_length[k])) +\
' couples of nodes - tolerance ' + \
str(k * factor + minvalue / 3.)
if len(inv_length[k]) == 1:
cmd = 'equivalence node ' + \
' '.join(' '.join(str(n) for n in inv_length[k])) + \
' tolerance ' + str(k * factor + minvalue / 3.)
cubit.cmd(cmd)
print 'equivalence ' + str(len(inv_length[k])) + \
' couples of nodes - tolerance ' + \
str(k * factor + minvalue / 3.)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.