gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Logger implementation loosely modeled on PEP 282. We don't use the
PEP 282 logger implementation in the stdlib ('logging') because it's
idiosyncratic and a bit slow for our purposes (we don't use threads).
"""
# This module must not depend on any non-stdlib modules to
# avoid circular import problems
import os
import errno
import sys
import time
import traceback
try:
import syslog
except ImportError:
# only required when 'syslog' is specified as the log filename
pass
class LevelsByName:
CRIT = 50 # messages that probably require immediate user attention
ERRO = 40 # messages that indicate a potentially ignorable error condition
WARN = 30 # messages that indicate issues which aren't errors
INFO = 20 # normal informational output
DEBG = 10 # messages useful for users trying to debug configurations
TRAC = 5 # messages useful to developers trying to debug plugins
BLAT = 3 # messages useful for developers trying to debug supervisor
class LevelsByDescription:
critical = LevelsByName.CRIT
error = LevelsByName.ERRO
warn = LevelsByName.WARN
info = LevelsByName.INFO
debug = LevelsByName.DEBG
trace = LevelsByName.TRAC
blather = LevelsByName.BLAT
def _levelNumbers():
bynumber = {}
for name, number in LevelsByName.__dict__.items():
if not name.startswith('_'):
bynumber[number] = name
return bynumber
LOG_LEVELS_BY_NUM = _levelNumbers()
def getLevelNumByDescription(description):
num = getattr(LevelsByDescription, description, None)
return num
class Handler(object):
fmt = '%(message)s'
level = LevelsByName.INFO
def setFormat(self, fmt):
self.fmt = fmt
def setLevel(self, level):
self.level = level
def flush(self):
try:
self.stream.flush()
except IOError, why:
# if supervisor output is piped, EPIPE can be raised at exit
if why[0] != errno.EPIPE:
raise
def close(self):
if hasattr(self.stream, 'fileno'):
fd = self.stream.fileno()
if fd < 3: # don't ever close stdout or stderr
return
self.stream.close()
def emit(self, record):
try:
msg = self.fmt % record.asdict()
try:
self.stream.write(msg)
except UnicodeError:
self.stream.write(msg.encode("UTF-8"))
self.flush()
except:
self.handleError(record)
def handleError(self, record):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr)
del ei
class FileHandler(Handler):
"""File handler which supports reopening of logs.
"""
def __init__(self, filename, mode="a"):
self.stream = open(filename, mode)
self.baseFilename = filename
self.mode = mode
def reopen(self):
self.close()
self.stream = open(self.baseFilename, self.mode)
def remove(self):
try:
os.remove(self.baseFilename)
except OSError, why:
if why[0] != errno.ENOENT:
raise
class StreamHandler(Handler):
def __init__(self, strm=None):
self.stream = strm
def remove(self):
if hasattr(self.stream, 'clear'):
self.stream.clear()
def reopen(self):
pass
class BoundIO:
def __init__(self, maxbytes, buf=''):
self.maxbytes = maxbytes
self.buf = buf
def flush(self):
pass
def close(self):
self.clear()
def write(self, s):
slen = len(s)
if len(self.buf) + slen > self.maxbytes:
self.buf = self.buf[slen:]
self.buf += s
def getvalue(self):
return self.buf
def clear(self):
self.buf = ''
class RotatingFileHandler(FileHandler):
open_streams = {}
def __init__(self, filename, mode='a', maxBytes=512*1024*1024,
backupCount=10):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
if maxBytes > 0:
mode = 'a' # doesn't make sense otherwise!
self.mode = mode
self.baseFilename = filename
self.stream = self.stream or open(filename, mode)
self.maxBytes = maxBytes
self.backupCount = backupCount
self.counter = 0
self.every = 10
class _stream(object):
"""
Descriptor for managing open filehandles so that only one
filehandle per file path ever receives logging.
"""
def __get__(self, obj, objtype):
"""
Return open filehandle or None
"""
return objtype.open_streams.get(obj.baseFilename)
def __set__(self, obj, stream):
"""
Set open filehandle for filename defined on the
RotatingFileHandler
"""
obj.open_streams[obj.baseFilename] = stream
stream = _stream()
def close(self):
if self.stream: self.stream.close()
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
FileHandler.emit(self, record)
self.doRollover()
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.maxBytes <= 0:
return
if not (self.stream.tell() >= self.maxBytes):
return
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
if os.path.exists(dfn):
try:
os.remove(dfn)
except OSError, why:
# catch race condition (already deleted)
if why[0] != errno.ENOENT:
raise
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
try:
os.remove(dfn)
except OSError, why:
# catch race condition (already deleted)
if why[0] != errno.ENOENT:
raise
os.rename(self.baseFilename, dfn)
self.stream = open(self.baseFilename, 'w')
class LogRecord:
def __init__(self, level, msg, **kw):
self.level = level
self.msg = msg
self.kw = kw
self.dictrepr = None
def asdict(self):
if self.dictrepr is None:
now = time.time()
msecs = (now - long(now)) * 1000
part1 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(now))
asctime = '%s,%03d' % (part1, msecs)
levelname = LOG_LEVELS_BY_NUM[self.level]
if self.kw:
msg = self.msg % self.kw
else:
msg = self.msg
self.dictrepr = {'message':msg, 'levelname':levelname,
'asctime':asctime}
return self.dictrepr
class Logger:
def __init__(self, level=None, handlers=None):
if level is None:
level = LevelsByName.INFO
self.level = level
if handlers is None:
handlers = []
self.handlers = handlers
def close(self):
for handler in self.handlers:
handler.close()
def blather(self, msg, **kw):
if LevelsByName.BLAT >= self.level:
self.log(LevelsByName.BLAT, msg, **kw)
def trace(self, msg, **kw):
if LevelsByName.TRAC >= self.level:
self.log(LevelsByName.TRAC, msg, **kw)
def debug(self, msg, **kw):
if LevelsByName.DEBG >= self.level:
self.log(LevelsByName.DEBG, msg, **kw)
def info(self, msg, **kw):
if LevelsByName.INFO >= self.level:
self.log(LevelsByName.INFO, msg, **kw)
def warn(self, msg, **kw):
if LevelsByName.WARN >= self.level:
self.log(LevelsByName.WARN, msg, **kw)
def error(self, msg, **kw):
if LevelsByName.ERRO >= self.level:
self.log(LevelsByName.ERRO, msg, **kw)
def critical(self, msg, **kw):
if LevelsByName.CRIT >= self.level:
self.log(LevelsByName.CRIT, msg, **kw)
def log(self, level, msg, **kw):
record = LogRecord(level, msg, **kw)
for handler in self.handlers:
if level >= handler.level:
handler.emit(record)
def addHandler(self, hdlr):
self.handlers.append(hdlr)
def getvalue(self):
raise NotImplementedError
class SyslogHandler(Handler):
def __init__(self):
assert 'syslog' in globals(), "Syslog module not present"
def close(self):
pass
def emit(self, record):
try:
params = record.asdict()
message = params['message']
for line in message.rstrip('\n').split('\n'):
params['message'] = line
msg = self.fmt % params
try:
syslog.syslog(msg)
except UnicodeError:
syslog.syslog(msg.encode("UTF-8"))
except:
self.handleError(record)
def getLogger(filename, level, fmt, rotating=False, maxbytes=0, backups=0,
stdout=False):
handlers = []
logger = Logger(level)
if filename is None:
if not maxbytes:
maxbytes = 1<<21 #2MB
io = BoundIO(maxbytes)
handlers.append(StreamHandler(io))
logger.getvalue = io.getvalue
elif filename == 'syslog':
handlers.append(SyslogHandler())
else:
if rotating is False:
handlers.append(FileHandler(filename))
else:
handlers.append(RotatingFileHandler(filename,'a',maxbytes,backups))
if stdout:
handlers.append(StreamHandler(sys.stdout))
for handler in handlers:
handler.setFormat(fmt)
handler.setLevel(level)
logger.addHandler(handler)
return logger
|
|
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manipulate access control lists that Cloud Storage provides.
:class:`google.cloud.storage.bucket.Bucket` has a getting method that creates
an ACL object under the hood, and you can interact with that using
:func:`google.cloud.storage.bucket.Bucket.acl`:
.. literalinclude:: storage_snippets.py
:start-after: [START client_bucket_acl]
:end-before: [END client_bucket_acl]
Adding and removing permissions can be done with the following methods
(in increasing order of granularity):
- :func:`ACL.all`
corresponds to access for all users.
- :func:`ACL.all_authenticated` corresponds
to access for all users that are signed into a Google account.
- :func:`ACL.domain` corresponds to access on a
per Google Apps domain (ie, ``example.com``).
- :func:`ACL.group` corresponds to access on a
per group basis (either by ID or e-mail address).
- :func:`ACL.user` corresponds to access on a
per user basis (either by ID or e-mail address).
And you are able to ``grant`` and ``revoke`` the following roles:
- **Reading**:
:func:`_ACLEntity.grant_read` and :func:`_ACLEntity.revoke_read`
- **Writing**:
:func:`_ACLEntity.grant_write` and :func:`_ACLEntity.revoke_write`
- **Owning**:
:func:`_ACLEntity.grant_owner` and :func:`_ACLEntity.revoke_owner`
You can use any of these like any other factory method (these happen to
be :class:`_ACLEntity` factories):
.. literalinclude:: storage_snippets.py
:start-after: [START acl_user_settings]
:end-before: [END acl_user_settings]
You can also chain these ``grant_*`` and ``revoke_*`` methods together
for brevity:
.. literalinclude:: storage_snippets.py
:start-after: [START acl_revoke_write]
:end-before: [END acl_revoke_write]
After that, you can save any changes you make with the
:func:`google.cloud.storage.acl.ACL.save` method:
.. literalinclude:: storage_snippets.py
:start-after: [START acl_save]
:end-before: [END acl_save]
You can alternatively save any existing :class:`google.cloud.storage.acl.ACL`
object (whether it was created by a factory method or not) from a
:class:`google.cloud.storage.bucket.Bucket`:
.. literalinclude:: storage_snippets.py
:start-after: [START acl_save_bucket]
:end-before: [END acl_save_bucket]
To get the list of ``entity`` and ``role`` for each unique pair, the
:class:`ACL` class is iterable:
.. literalinclude:: storage_snippets.py
:start-after: [START acl_print]
:end-before: [END acl_print]
This list of tuples can be used as the ``entity`` and ``role`` fields
when sending metadata for ACLs to the API.
"""
class _ACLEntity(object):
"""Class representing a set of roles for an entity.
This is a helper class that you likely won't ever construct
outside of using the factor methods on the :class:`ACL` object.
:type entity_type: str
:param entity_type: The type of entity (ie, 'group' or 'user').
:type identifier: str
:param identifier: The ID or e-mail of the entity. For the special
entity types (like 'allUsers') this is optional.
"""
READER_ROLE = 'READER'
WRITER_ROLE = 'WRITER'
OWNER_ROLE = 'OWNER'
def __init__(self, entity_type, identifier=None):
self.identifier = identifier
self.roles = set([])
self.type = entity_type
def __str__(self):
if not self.identifier:
return str(self.type)
else:
return '{acl.type}-{acl.identifier}'.format(acl=self)
def __repr__(self):
return '<ACL Entity: {acl} ({roles})>'.format(
acl=self, roles=', '.join(self.roles))
def get_roles(self):
"""Get the list of roles permitted by this entity.
:rtype: list of strings
:returns: The list of roles associated with this entity.
"""
return self.roles
def grant(self, role):
"""Add a role to the entity.
:type role: str
:param role: The role to add to the entity.
"""
self.roles.add(role)
def revoke(self, role):
"""Remove a role from the entity.
:type role: str
:param role: The role to remove from the entity.
"""
if role in self.roles:
self.roles.remove(role)
def grant_read(self):
"""Grant read access to the current entity."""
self.grant(_ACLEntity.READER_ROLE)
def grant_write(self):
"""Grant write access to the current entity."""
self.grant(_ACLEntity.WRITER_ROLE)
def grant_owner(self):
"""Grant owner access to the current entity."""
self.grant(_ACLEntity.OWNER_ROLE)
def revoke_read(self):
"""Revoke read access from the current entity."""
self.revoke(_ACLEntity.READER_ROLE)
def revoke_write(self):
"""Revoke write access from the current entity."""
self.revoke(_ACLEntity.WRITER_ROLE)
def revoke_owner(self):
"""Revoke owner access from the current entity."""
self.revoke(_ACLEntity.OWNER_ROLE)
class ACL(object):
"""Container class representing a list of access controls."""
_URL_PATH_ELEM = 'acl'
_PREDEFINED_QUERY_PARAM = 'predefinedAcl'
PREDEFINED_XML_ACLS = {
# XML API name -> JSON API name
'project-private': 'projectPrivate',
'public-read': 'publicRead',
'public-read-write': 'publicReadWrite',
'authenticated-read': 'authenticatedRead',
'bucket-owner-read': 'bucketOwnerRead',
'bucket-owner-full-control': 'bucketOwnerFullControl',
}
PREDEFINED_JSON_ACLS = frozenset([
'private',
'projectPrivate',
'publicRead',
'publicReadWrite',
'authenticatedRead',
'bucketOwnerRead',
'bucketOwnerFullControl',
])
"""See:
https://cloud.google.com/storage/docs/access-control/lists#predefined-acl
"""
loaded = False
# Subclasses must override to provide these attributes (typically,
# as properties).
reload_path = None
save_path = None
def __init__(self):
self.entities = {}
def _ensure_loaded(self):
"""Load if not already loaded."""
if not self.loaded:
self.reload()
def reset(self):
"""Remove all entities from the ACL, and clear the ``loaded`` flag."""
self.entities.clear()
self.loaded = False
def __iter__(self):
self._ensure_loaded()
for entity in self.entities.values():
for role in entity.get_roles():
if role:
yield {'entity': str(entity), 'role': role}
def entity_from_dict(self, entity_dict):
"""Build an _ACLEntity object from a dictionary of data.
An entity is a mutable object that represents a list of roles
belonging to either a user or group or the special types for all
users and all authenticated users.
:type entity_dict: dict
:param entity_dict: Dictionary full of data from an ACL lookup.
:rtype: :class:`_ACLEntity`
:returns: An Entity constructed from the dictionary.
"""
entity = entity_dict['entity']
role = entity_dict['role']
if entity == 'allUsers':
entity = self.all()
elif entity == 'allAuthenticatedUsers':
entity = self.all_authenticated()
elif '-' in entity:
entity_type, identifier = entity.split('-', 1)
entity = self.entity(entity_type=entity_type,
identifier=identifier)
if not isinstance(entity, _ACLEntity):
raise ValueError('Invalid dictionary: %s' % entity_dict)
entity.grant(role)
return entity
def has_entity(self, entity):
"""Returns whether or not this ACL has any entries for an entity.
:type entity: :class:`_ACLEntity`
:param entity: The entity to check for existence in this ACL.
:rtype: bool
:returns: True of the entity exists in the ACL.
"""
self._ensure_loaded()
return str(entity) in self.entities
def get_entity(self, entity, default=None):
"""Gets an entity object from the ACL.
:type entity: :class:`_ACLEntity` or string
:param entity: The entity to get lookup in the ACL.
:type default: anything
:param default: This value will be returned if the entity
doesn't exist.
:rtype: :class:`_ACLEntity`
:returns: The corresponding entity or the value provided
to ``default``.
"""
self._ensure_loaded()
return self.entities.get(str(entity), default)
def add_entity(self, entity):
"""Add an entity to the ACL.
:type entity: :class:`_ACLEntity`
:param entity: The entity to add to this ACL.
"""
self._ensure_loaded()
self.entities[str(entity)] = entity
def entity(self, entity_type, identifier=None):
"""Factory method for creating an Entity.
If an entity with the same type and identifier already exists,
this will return a reference to that entity. If not, it will
create a new one and add it to the list of known entities for
this ACL.
:type entity_type: str
:param entity_type: The type of entity to create
(ie, ``user``, ``group``, etc)
:type identifier: str
:param identifier: The ID of the entity (if applicable).
This can be either an ID or an e-mail address.
:rtype: :class:`_ACLEntity`
:returns: A new Entity or a reference to an existing identical entity.
"""
entity = _ACLEntity(entity_type=entity_type, identifier=identifier)
if self.has_entity(entity):
entity = self.get_entity(entity)
else:
self.add_entity(entity)
return entity
def user(self, identifier):
"""Factory method for a user Entity.
:type identifier: str
:param identifier: An id or e-mail for this particular user.
:rtype: :class:`_ACLEntity`
:returns: An Entity corresponding to this user.
"""
return self.entity('user', identifier=identifier)
def group(self, identifier):
"""Factory method for a group Entity.
:type identifier: str
:param identifier: An id or e-mail for this particular group.
:rtype: :class:`_ACLEntity`
:returns: An Entity corresponding to this group.
"""
return self.entity('group', identifier=identifier)
def domain(self, domain):
"""Factory method for a domain Entity.
:type domain: str
:param domain: The domain for this entity.
:rtype: :class:`_ACLEntity`
:returns: An entity corresponding to this domain.
"""
return self.entity('domain', identifier=domain)
def all(self):
"""Factory method for an Entity representing all users.
:rtype: :class:`_ACLEntity`
:returns: An entity representing all users.
"""
return self.entity('allUsers')
def all_authenticated(self):
"""Factory method for an Entity representing all authenticated users.
:rtype: :class:`_ACLEntity`
:returns: An entity representing all authenticated users.
"""
return self.entity('allAuthenticatedUsers')
def get_entities(self):
"""Get a list of all Entity objects.
:rtype: list of :class:`_ACLEntity` objects
:returns: A list of all Entity objects.
"""
self._ensure_loaded()
return list(self.entities.values())
@property
def client(self):
"""Abstract getter for the object client."""
raise NotImplementedError
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current ACL.
:rtype: :class:`google.cloud.storage.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self.client
return client
def reload(self, client=None):
"""Reload the ACL data from Cloud Storage.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
path = self.reload_path
client = self._require_client(client)
self.entities.clear()
found = client._connection.api_request(method='GET', path=path)
self.loaded = True
for entry in found.get('items', ()):
self.add_entity(self.entity_from_dict(entry))
def _save(self, acl, predefined, client):
"""Helper for :meth:`save` and :meth:`save_predefined`.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type predefined: str
:param predefined:
(Optional) An identifier for a predefined ACL. Must be one of the
keys in :attr:`PREDEFINED_JSON_ACLS` If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
query_params = {'projection': 'full'}
if predefined is not None:
acl = []
query_params[self._PREDEFINED_QUERY_PARAM] = predefined
path = self.save_path
client = self._require_client(client)
result = client._connection.api_request(
method='PATCH',
path=path,
data={self._URL_PATH_ELEM: list(acl)},
query_params=query_params)
self.entities.clear()
for entry in result.get(self._URL_PATH_ELEM, ()):
self.add_entity(self.entity_from_dict(entry))
self.loaded = True
def save(self, acl=None, client=None):
"""Save this ACL for the current bucket.
:type acl: :class:`google.cloud.storage.acl.ACL`, or a compatible list.
:param acl: The ACL object to save. If left blank, this will save
current entries.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
if acl is None:
acl = self
save_to_backend = acl.loaded
else:
save_to_backend = True
if save_to_backend:
self._save(acl, None, client)
def save_predefined(self, predefined, client=None):
"""Save this ACL for the current bucket using a predefined ACL.
:type predefined: str
:param predefined: An identifier for a predefined ACL. Must be one
of the keys in :attr:`PREDEFINED_JSON_ACLS`
or :attr:`PREDEFINED_XML_ACLS` (which will be
aliased to the corresponding JSON name).
If passed, `acl` must be None.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
predefined = self.PREDEFINED_XML_ACLS.get(predefined, predefined)
if predefined not in self.PREDEFINED_JSON_ACLS:
raise ValueError("Invalid predefined ACL: %s" % (predefined,))
self._save(None, predefined, client)
def clear(self, client=None):
"""Remove all ACL entries.
Note that this won't actually remove *ALL* the rules, but it
will remove all the non-default rules. In short, you'll still
have access to a bucket that you created even after you clear
ACL rules with this method.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the ACL's parent.
"""
self.save([], client=client)
class BucketACL(ACL):
"""An ACL specifically for a bucket.
:type bucket: :class:`google.cloud.storage.bucket.Bucket`
:param bucket: The bucket to which this ACL relates.
"""
def __init__(self, bucket):
super(BucketACL, self).__init__()
self.bucket = bucket
@property
def client(self):
"""The client bound to this ACL's bucket."""
return self.bucket.client
@property
def reload_path(self):
"""Compute the path for GET API requests for this ACL."""
return '%s/%s' % (self.bucket.path, self._URL_PATH_ELEM)
@property
def save_path(self):
"""Compute the path for PATCH API requests for this ACL."""
return self.bucket.path
class DefaultObjectACL(BucketACL):
"""A class representing the default object ACL for a bucket."""
_URL_PATH_ELEM = 'defaultObjectAcl'
_PREDEFINED_QUERY_PARAM = 'predefinedDefaultObjectAcl'
class ObjectACL(ACL):
"""An ACL specifically for a Cloud Storage object / blob.
:type blob: :class:`google.cloud.storage.blob.Blob`
:param blob: The blob that this ACL corresponds to.
"""
def __init__(self, blob):
super(ObjectACL, self).__init__()
self.blob = blob
@property
def client(self):
"""The client bound to this ACL's blob."""
return self.blob.client
@property
def reload_path(self):
"""Compute the path for GET API requests for this ACL."""
return '%s/acl' % self.blob.path
@property
def save_path(self):
"""Compute the path for PATCH API requests for this ACL."""
return self.blob.path
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 University of Southern California / ISI
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the base baremetal driver class."""
import mox
from oslo.config import cfg
from nova.compute import power_state
from nova import exception
from nova import test
from nova.tests.image import fake as fake_image
from nova.tests import utils
from nova.tests.virt.baremetal.db import base as bm_db_base
from nova.tests.virt.baremetal.db import utils as bm_db_utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import db
from nova.virt.baremetal import driver as bm_driver
from nova.virt.baremetal import fake
from nova.virt.baremetal import pxe
CONF = cfg.CONF
COMMON_FLAGS = dict(
firewall_driver='nova.virt.baremetal.fake.FakeFirewallDriver',
host='test_host',
)
BAREMETAL_FLAGS = dict(
driver='nova.virt.baremetal.fake.FakeDriver',
instance_type_extra_specs=['cpu_arch:test', 'test_spec:test_value'],
power_manager='nova.virt.baremetal.fake.FakePowerManager',
vif_driver='nova.virt.baremetal.fake.FakeVifDriver',
volume_driver='nova.virt.baremetal.fake.FakeVolumeDriver',
group='baremetal',
)
class BareMetalDriverNoDBTestCase(test.NoDBTestCase):
def setUp(self):
super(BareMetalDriverNoDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
self.driver = bm_driver.BareMetalDriver(None)
def test_validate_driver_loading(self):
self.assertTrue(isinstance(self.driver.driver,
fake.FakeDriver))
self.assertTrue(isinstance(self.driver.vif_driver,
fake.FakeVifDriver))
self.assertTrue(isinstance(self.driver.volume_driver,
fake.FakeVolumeDriver))
self.assertTrue(isinstance(self.driver.firewall_driver,
fake.FakeFirewallDriver))
class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
def setUp(self):
super(BareMetalDriverWithDBTestCase, self).setUp()
self.flags(**COMMON_FLAGS)
self.flags(**BAREMETAL_FLAGS)
fake_image.stub_out_image_service(self.stubs)
self.context = utils.get_test_admin_context()
self.driver = bm_driver.BareMetalDriver(None)
self.addCleanup(fake_image.FakeImageService_reset)
def _create_node(self, node_info=None, nic_info=None):
result = {}
if node_info is None:
node_info = bm_db_utils.new_bm_node(
id=123,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
if nic_info is None:
nic_info = [
{'address': '01:23:45:67:89:01', 'datapath_id': '0x1',
'port_no': 1},
{'address': '01:23:45:67:89:02', 'datapath_id': '0x2',
'port_no': 2},
]
result['node_info'] = node_info
result['nic_info'] = nic_info
result['node'] = db.bm_node_create(self.context, node_info)
for nic in nic_info:
db.bm_interface_create(
self.context,
result['node']['id'],
nic['address'],
nic['datapath_id'],
nic['port_no'],
)
result['instance'] = utils.get_test_instance()
result['instance']['node'] = result['node']['uuid']
result['spawn_params'] = dict(
admin_password='test_pass',
block_device_info=None,
context=self.context,
image_meta=utils.get_test_image_info(
None, result['instance']),
injected_files=[('/fake/path', 'hello world')],
instance=result['instance'],
network_info=utils.get_test_network_info(),
)
result['destroy_params'] = dict(
instance=result['instance'],
network_info=result['spawn_params']['network_info'],
block_device_info=result['spawn_params']['block_device_info'],
)
return result
def test_get_host_stats(self):
node = self._create_node()
stats = self.driver.get_host_stats()
self.assertTrue(isinstance(stats, list))
self.assertEqual(len(stats), 1)
stats = stats[0]
self.assertEqual(stats['cpu_arch'], 'test')
self.assertEqual(stats['test_spec'], 'test_value')
self.assertEqual(stats['hypervisor_type'], 'baremetal')
self.assertEqual(stats['hypervisor_hostname'], node['node']['uuid'])
self.assertEqual(stats['host'], 'test_host')
self.assertEqual(stats['vcpus'], 2)
self.assertEqual(stats['host_memory_total'], 2048)
def test_spawn_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
self.assertEqual(row['instance_name'], node['instance']['hostname'])
def test_macs_from_nic_for_instance(self):
node = self._create_node()
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_after_spawn(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
expected = set([nic['address'] for nic in node['nic_info']])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance(self):
node = self._create_node()
expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
self.assertEqual(
expected, self.driver.macs_for_instance(node['instance']))
def test_macs_for_instance_no_interfaces(self):
# Nodes cannot boot with no MACs, so we raise an error if that happens.
node = self._create_node(nic_info=[])
self.assertRaises(exception.NovaException,
self.driver.macs_for_instance, node['instance'])
def test_spawn_node_already_associated(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'instance_uuid': '1234-5678'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_node_in_use(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
def test_spawn_node_not_found(self):
node = self._create_node()
db.bm_node_update(self.context, node['node']['id'],
{'uuid': 'hide-this-node'})
self.assertRaises(exception.NovaException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], None)
def test_spawn_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
def test_spawn_fails_to_cleanup(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'activate_node')
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.activate_node().AndRaise(test.TestingException)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.driver.spawn, **node['spawn_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
def test_destroy_ok(self):
node = self._create_node()
self.driver.spawn(**node['spawn_params'])
self.driver.destroy(**node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.DELETED)
self.assertEqual(row['instance_uuid'], None)
self.assertEqual(row['instance_name'], None)
def test_destroy_fails(self):
node = self._create_node()
self.mox.StubOutWithMock(fake.FakePowerManager, 'deactivate_node')
fake.FakePowerManager.deactivate_node().AndReturn(None)
fake.FakePowerManager.deactivate_node().AndRaise(test.TestingException)
self.mox.ReplayAll()
self.driver.spawn(**node['spawn_params'])
self.assertRaises(test.TestingException,
self.driver.destroy, **node['destroy_params'])
row = db.bm_node_get(self.context, node['node']['id'])
self.assertEqual(row['task_state'], baremetal_states.ERROR)
self.assertEqual(row['instance_uuid'], node['instance']['uuid'])
def test_get_available_resources(self):
node = self._create_node()
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb'],
node['node_info']['memory_mb'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['supported_instances'],
'[["test", "baremetal", "baremetal"]]')
self.driver.spawn(**node['spawn_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'],
node['node_info']['memory_mb'])
self.driver.destroy(**node['destroy_params'])
resources = self.driver.get_available_resource(node['node']['uuid'])
self.assertEqual(resources['memory_mb_used'], 0)
self.assertEqual(resources['stats']['test_spec'], 'test_value')
def test_get_available_nodes(self):
self.assertEqual(0, len(self.driver.get_available_nodes()))
self.assertEqual(0, len(self.driver.get_available_nodes(refresh=True)))
node1 = self._create_node()
self.assertEqual(1, len(self.driver.get_available_nodes()))
node1['instance']['hostname'] = 'test-host-1'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(1, len(self.driver.get_available_nodes()))
self.assertEqual([node1['node']['uuid']],
self.driver.get_available_nodes())
def test_list_instances(self):
self.assertEqual([], self.driver.list_instances())
node1 = self._create_node()
self.assertEqual([], self.driver.list_instances())
node_info = bm_db_utils.new_bm_node(
id=456,
service_host='test_host',
cpus=2,
memory_mb=2048,
)
nic_info = [
{'address': 'cc:cc:cc', 'datapath_id': '0x1',
'port_no': 1},
{'address': 'dd:dd:dd', 'datapath_id': '0x2',
'port_no': 2},
]
node2 = self._create_node(node_info=node_info, nic_info=nic_info)
self.assertEqual([], self.driver.list_instances())
node1['instance']['hostname'] = 'test-host-1'
node2['instance']['hostname'] = 'test-host-2'
self.driver.spawn(**node1['spawn_params'])
self.assertEqual(['test-host-1'],
self.driver.list_instances())
self.driver.spawn(**node2['spawn_params'])
self.assertEqual(['test-host-1', 'test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node1['destroy_params'])
self.assertEqual(['test-host-2'],
self.driver.list_instances())
self.driver.destroy(**node2['destroy_params'])
self.assertEqual([], self.driver.list_instances())
def test_get_info_no_such_node(self):
node = self._create_node()
self.assertRaises(exception.InstanceNotFound,
self.driver.get_info,
node['instance'])
def test_get_info_ok(self):
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
res = self.driver.get_info(node['instance'])
self.assertEqual(res['state'], power_state.RUNNING)
def test_get_info_with_defunct_pm(self):
# test fix for bug 1178378
node = self._create_node()
db.bm_node_associate_and_update(self.context, node['node']['uuid'],
{'instance_uuid': node['instance']['uuid'],
'instance_name': node['instance']['hostname'],
'task_state': baremetal_states.ACTIVE})
# fake the power manager and don't get a power state
self.mox.StubOutWithMock(fake.FakePowerManager, 'is_power_on')
fake.FakePowerManager.is_power_on().AndReturn(None)
self.mox.ReplayAll()
res = self.driver.get_info(node['instance'])
# prior to the fix, returned power_state was SHUTDOWN
self.assertEqual(res['state'], power_state.NOSTATE)
self.mox.VerifyAll()
def test_dhcp_options_for_instance(self):
node = self._create_node()
fake_bootfile = "pxelinux.0"
self.mox.StubOutWithMock(pxe, 'get_pxe_bootfile_name')
pxe.get_pxe_bootfile_name(mox.IgnoreArg()).AndReturn(fake_bootfile)
self.mox.ReplayAll()
expected = [{'opt_name': 'bootfile-name', 'opt_value': fake_bootfile},
{'opt_name': 'server-ip-address', 'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server', 'opt_value': CONF.my_ip}]
res = self.driver.dhcp_options_for_instance(node['instance'])
self.assertEqual(expected.sort(), res.sort())
self.mox.VerifyAll()
|
|
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
from statsmodels.tsa.stattools import coint
# just set the seed for the random number generator
#np.random.seed(107)
import sys
import matplotlib.pyplot as plt
import time
"""
class MovAvg:
def __init__(self, n = 200, s = 1):
self._window = n
self._num = s
self._vals = vQueue(self._num)
self._last = np.zeros(s, dtype = np.float32)
self._MA = np.zeros(s, dtype = np.float32)
def update(self, vec): # vec assumed to be np array of size s
self._vals.enqueue(vec)
self._MA += vec
if self._vals.size() > self._window:
self._last = self._vals.dequeue()
self._MA -= self._last
def getMA(self, reset):
if reset: # allow the option of resetting the MA from time to time
self._MA = self._vals.sum()
return self._MA / self._vals.size()
class PairsTrading(MovAvg):
def __init__(self, th_p = 0.05, nstocks = 1):
self._thp = th_p # threshold for p-value
self._n = nstocks
self._smatrix = np.zeros((self._n, self._n),np.float32) # scores
self._pmatrix = np.ones((self._n, self._n),np.float32) # pvalues
# pvalues are the probabilities of the agreement being a coincidence
self._pairs = [] # cointegrated pair indices
self._spreads = [] # used in seeing if the current spread is
# above or below the medium spred - we are betting
# on mean-reversion for the spread
self._MApairs = []
def update_pairs(self, data):
self._pairs = []
for i in range(self._n):
for j in range(i+1, self._n):
score, pvalue = coint(data[:,i], data[:,j])[0:2]
self._smatrix[i, j], self._smatrix[j, i] = score, score
self._pmatrix[i, j], self._pmatrix[j, i] = pvalue, pvalue
if pvalue < self._thp: # p values below confidence levels
self._pairs.append((i, j))
def getPvals_Pairs(self):
return self._pmatrix, self._pairs
def update_spreads(self):
self._spreads = []
self._MApairs = []
for _ in self._pairs:
?????????
"""
class Market_Maker:
def __init__(self):
self._stocks = ["BOND", "GS", "MS", "WFC", "XLF", "VALBZ", "VALE"]
self._limits = {"BOND": 100, "GS": 100, "MS": 100, "WFC": 100, \
"XLF": 100, "VALBZ": 10, "VALE": 10}
self._inventory = {"XLF": 0, "VALE": 0, "VALBZ": 0, "GS": 0, \
"BOND": 0, "MS": 0, "WFC": 0}
self._buy = {"XLF": "", "GS": "", "MS": "", "WFC": "", "BOND": "", \
"VALE": "", "VALBZ": ""}
self._sell = {"XLF": "", "GS": "", "MS": "", "WFC": "", "BOND": "", \
"VALE": "", "VALBZ": ""}
self._buyprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0, \
"VALE":0, "VALBZ":0}
self._sellprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0, \
"VALE":0, "VALBZ":0}
self._quant = 5
self._cancels = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._buyidx = 1
self._sellidx = 100000
self._curr_bids = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._curr_asks = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
def update_inventory(self, fill_strings):
## go over the fill strings, delete the corresponding outstanding bids
for _ in fill_strings:
name, ord, stock, action = fill_strings[_].split()[0:4]
ord = int(ord)
if name == 'FILL' and action == 'BUY':
to_delete = []
# go over the bids and asks and cancel that one
for idx in range(len(self._curr_bids[stock]))
if self._curr_bids[stock][idx][0] == ord
to_delete.append(idx)
del self._curr_bids[stock][to_delete] # delte filled bids
self._inventory[stock] += self._quant
if name == 'FILL' and action == 'SELL':
to_delete = []
# go over the bids and asks and cancel that one
for idx in range(len(self._curr_asks[stock]))
if self._curr_asks[stock][idx][0] == ord
to_delete.append(idx)
del self._curr_asks[stock][to_delete] # delete filled asks
self._inventory[stock] -= self._quant #
def get_avg_prices(self, books):
# "book" is the book dictionary for this collection of stocks
for _ in books:
key = books[_]['symbol']
self._buyprices[key] = 0.0
length = len(books['buy'])
for idx in range(length):
self._buyprices[key] += books[_]['buy'][idx][0]
self._buyprices[key] //= length
self._sellprices[key] = 0.0
length = len(books['sell'])
for idx in range(length):
self._sellprices[key] += books[_]['sell'][idx][0]
self._sellprices[key] //= length
def purge(self):
for _ in self._stocks:
self._cancels[_] = []
# check if there are any existing stocks with outstanding
# bids or asks and cancel them
for order in self._curr_bids[_]:
self.cancels[_].append("CANCEL " + str(order[0]))
for order in self._curr_asks[_]:
self.cancels[_].append("CANCEL " + str(order[0]))
self._curr_bids = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
self._curr_asks = {"XLF": [], "GS": [], "MS": [], "WFC": [], \
"BOND": [], "VALE": [], "VALBZ": []}
return self._cancels
def update_orders(self): # dictionary of prices
for _ in self._stocks: # stock key
self._buyidx += 1
self._sellidx += 1
# update buy and sell orders
self._buy[_] = "ADD " + self._buyidx + " " + key + " BUY " + \
str(sell._buyprices[_])+" " + self._quant
self._sell[_] = "ADD " + self.buyidx + " " + key + " SELL " + \
str(sell._sellprices[_])+" " + self._quant
if self._inventory[_] < self._limits[_] - self._quant:
self._curr_bids[_].append([self._buyidx, sell._buyprices[_], \
self._quant])
if self._inventory[_] > -self._limits[_] + self._quant
self._curr_asks[_].append([self._sellidx, sell._sellprices[_],\
self._quant])
return self._buy,self._sell
class ConvTrade:
# convert XLF to/from a basket of 3 BOND, 3 GS, 3 MS, 2 WFC
# 100 cost per conversion regardless of the number of stocks
# ----------------------------------------------------------
# VALE is an ADR of VALBZ 1-1 ratio, 10 dolars per conversion,
# regardless of number of shares - use the max number of
# check if the values of these diverge
def __init__(self):
self._costConvETF = 100
self._limits = {"BOND": 100, "GS": 100, "MS": 100, "WFC": 100, \
"XLF": 100, "VALBZ": 10, "VALE": 10}
self._inventory = {"XLF": 0, "VALE": 0, "VALBZ": 0, "GS": 0, \
"BOND": 0, "MS": 0, "WFC": 0}
self._buyETF = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0}
self._sellETF = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0}
self._begin_time = int(round(time.time())) # time in seconds
self._current_time = self._begin_time
self._buyprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0}
self._sellprices = {"XLF": 0, "GS": 0, "MS": 0, "WFC": 0, "BOND": 0}
## one should first check if the transaction was successful,
## if that is true
def update_buy(self, buysignal):
for _ in buysignal:
self._inventory[_] += buysignal[_]
def update_sell(self, sellsignal):
self._inventory[_] -= sellsignal[_]
def get_avg_prices(self, books):
# "book" is a list of book dictionary for stocks
for _ in books:
key = books[_]['symbol']
self._buyprices[key] = 0.0
length = len(books['buy'])
for idx in range(length):
self._buyprices[key] += books[_]['buy'][idx][0]
self._buyprices[key] /= length
self._sellprices[key] = 0.0
length = len(books['sell'])
for idx in range(length):
self._sellprices[key] += books[_]['sell'][idx][0]
self._sellprices[key] /= length
def updateETF(self, prices): # dictionary of prices
diff_1 = 10 * prices["XLF"] - self._costConvETF - 3 * prices["BOND"] -\
2 * prices["GS"] - 3 * prices["MS"] - 2 * prices["WFC"]
diff_2 = 3 * prices["BOND"] + 2 * prices["GS"] + 3 * prices["MS"] + \
2 * prices["WFC"] - 10 * prices["XLF"] - self._costConvETF
# update time
self._curent_time = int(round(time.time()))
if diff_1 > 0.0:
quant_MS = (100 - self.inventory["MS"]) // 3
quant_GS = (100 - self.inventory["GS"]) // 2
quant_BOND = (100 - self.inventory["BOND"]) // 3
quant_WFC = (100 - self.inventory["WFC"]) // 2
quant_XLF = (100 + self.inventory["XLF"]) // 10
quant = min(quant_MS,quant_GS,quant_BOND,quant_WFC,quant_XLF)
self._buyETF = {"XLF": 0, "GS": 2*quant, "MS": 3*quant,
"WFC": 2*quant, "BOND": 3*quant}
self._sellETF = {"XLF":10*quant,"GS":0,"MS":0,"WFC":0,"BOND":0}
self._inventory["XLF"] -= 10 * quant
self._inventory["GS"] += 2*quant
self._inventory["BOND"] += 3*quant
self._inventory["WFC"] += 2*quant
self._inventory["MS"] += 3*quant
if diff_2 > 0.0:
quant_MS = (100 + self.inventory["MS"]) // 3
quant_GS = (100 + self.inventory["GS"]) // 2
quant_BOND = (100 + self.inventory["BOND"]) // 3
quant_WFC = (100 + self.inventory["WFC"]) // 2
quant_XLF = (100 - self.inventory["XLF"]) // 10
quant = min(quant_MS, quant_GS, quant_BOND, quant_WFC,quant_XLF)
self._sellETF = {"XLF": 0, "GS": 2*quant, "MS": 3*quant,
"WFC": 2*quant, "BOND": 3*quant}
self._buyETF = {"XLF":10*quant,"GS":0,"MS":0,"WFC":0,"BOND":0}
self._inventory["XLF"] += 10 * quant
self._inventory["GS"] -= 2*quant
self._inventory["BOND"] -= 3*quant
self._inventory["WFC"] -= 2*quant
self._inventory["MS"] -= 3*quant
# BE CAREFUL, WHEN BUYING XLF, THE SYNTAX MUST BE "CONVERT"
# NOT "BUY"
return self._buyETF,self._sellETF
"""
class MarketMaker(MovAvg):
def __init__(self, nstocks = 1):
self._n = nstocks
self._currbids = np.array(self._n, np.int32) # 1/0 for each stock
self._currasks = np.array(self._n, np.int32) # 1/0 for each stock
self._inventory = np.array(self._n, np.int32)
self._MAs = np.array(self._n, np.float32) # moving averages
self._MVs = np.array(self._n, np.float32) # moving volatilities
self._currbidprices = np.array(self._n, np.float32)
self._curraskprices = np.array(self._n, np.float32)
def update_
Strat = PairsTrading(nstocks = 2)
data_fslr = pd.read_csv('fslr.csv')
data_abgby = pd.read_csv('abgby.csv')
data_1 = data_fslr['Close'].ravel()
ndays = data_1.shape[0]
some_noise = np.random.normal(3, 1, ndays)
data_2 = data_1 + some_noise
stock_1 = pd.Series(data_1,name='FSLR')
stock_2 = pd.Series(data_2,name='FSLRP')
df = pd.concat([stock_1,stock_2], axis=1)
df.plot()
plt.show()
Strat.update(df.values)
pvalues, pairs = Strat.getPvals_Pairs()
print(pvalues,pairs)
"""
|
|
#!/usr/bin/env python
# Copyright (c) 2011-2020, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Gage adjustment
^^^^^^^^^^^^^^^
Concept
-------
The objective of this module is the adjustment of radar-based rainfall
estimates by rain gage observations. However, this module could also be
applied to adjust satellite rainfall by rain gage observations, remotely
sensed soil moisture patterns by ground truthing moisture sensors, or any
dense spatial point pattern which could be adjusted by sparse point
measurements (ground truth).
Basically, we only need two data sources:
- point observations (e.g. rain gage observations)
- set of (potentially irregular) unadjusted point values
(e.g. remotely sensed rainfall)
:cite:`Goudenhoofdt2009` provide an excellent overview of adjustment
procedures. The general idea is that we quantify the error of the
remotely sensed rainfall at the rain gage locations, assuming the rain
gage observation to be accurate.
The error can be assumed to be purely additive
(:class:`~wradlib.adjust.AdjustAdd`), purely multiplicative
(:class:`~wradlib.adjust.AdjustMultiply`, :class:`~wradlib.adjust.AdjustMFB`)
or a mixture of both (:class:`~wradlib.adjust.AdjustMixed`).
If the error is assumed to be heterogeneous in space
(:class:`~wradlib.adjust.AdjustAdd`, :class:`~wradlib.adjust.AdjustMultiply`,
:class:`~wradlib.adjust.AdjustMixed`), the error at the rain gage locations is
interpolated to the radar bin locations and then used to adjust (correct)
the raw radar rainfall estimates. In case of the AdjustMFB approach, though,
the multiplicative error is assumed to be homogeneous in space.
Quick start
-----------
The basic procedure consists of creating an adjustment object from the class
you want to use for adjustment. After that, you can call the object with the
actual data that is to be adjusted. The following example is using the
additive error model with default settings. ``obs_coords`` and
``raw_coords`` represent arrays with coordinate pairs for the gage
observations and the radar bins, respectively. ``obs`` and ``raw`` are
arrays containing the actual data::
adjuster = AdjustAdd(obs_coords, raw_coords)
adjusted = adjuster(obs, raw)
Both ``obs`` and ``raw`` need to be flat (1-dimensional) arrays of shape (n,)
that have the same length as the the ``obs_coords`` and ``raw_coords`` arrays,
respectively.
The user can specify the approach that should be used to interpolate the error
in space, as well as the keyword arguments which control the behaviour of the
interpolation approach. For this purpose, all interpolation classes from the
:mod:`wradlib.ipol` module are available and can be passed by using the
``ipclass`` argument. The default interpolation class is
Inverse Distance Weighting (:class:`~wradlib.ipol.Idw`). If you want to use
e.g. linear barycentric interpolation::
import wradlib.ipol as ipol
adjuster = AdjustAdd(obs_coords, raw_coords, ipclass=ipol.Linear)
adjusted = adjuster(obs, raw)
Warning
-------
Be aware that there are a lot of control parameters that can dramatically
influence the behaviour of the adjustment (which gauges are considered,
how is an error interpolation carried out, ...). Read the docs carefully
and try to experiment with the effects of the different control parameters.
There might be situations in which the algorithms decides - based on the
control parameter - not to do an adjustment and just return the unadjusted
values.
Cross validation
----------------
Another helpful feature is an easy-to-use method for leave-one-out
cross-validation :cite:`Cross-validation`. Cross validation is a standard
procedure for verifying rain gage adjustment or interpolation procedures. You
can start the cross validation in the same way as you start the actual
adjustment, however, you call the :meth:`~wradlib.adjust.AdjustBase.xvalidate`
method instead. The result of the cross validation are pairs of observation
and the corresponding adjustment result at the observation location. Using the
:mod:`wradlib.verify` module, you can compute error metrics for the cross
validation results::
adjuster = AdjustAdd(obs_coords, raw_coords)
observed, estimated = adjuster.xvalidate(obs, raw)
from wradlib.verify import ErrorMetrics
metrics = ErrorMetrics(observed, estimated)
metrics.report()
.. autosummary::
:nosignatures:
:toctree: generated/
{}
"""
__all__ = [
"AdjustBase",
"AdjustMFB",
"AdjustMultiply",
"AdjustAdd",
"AdjustMixed",
"RawAtObs",
"GageOnly",
"AdjustNone",
]
__doc__ = __doc__.format("\n ".join(__all__))
import numpy as np
from scipy import spatial, stats
from wradlib import ipol, util
class AdjustBase(ipol.IpolBase):
"""The basic adjustment class that inherits to all other classes.
All methods except the :meth:`~wradlib.adjust.AdjustBase.__call__` method
are inherited to the following adjustment classes.
Parameters
----------
obs_coords : :py:class:`numpy:numpy.ndarray`
array of floats of shape (number of points, 2)
x and y coordinate pairs of observation locations (e.g. rain gauges).
raw_coords : :py:class:`numpy:numpy.ndarray`
array of floats of shape (number of points, 2)
x and y coordinate pairs of raw (unadjusted) radar field
nnear_raws : int
Defaults to 9. This parameter controls the number of radar bins or
grid cells (in the neighbourhood of a rain gauge) which is used to
compute the value of the radar observation AT a rain gauge.
stat : str
Defaults to 'median'. Must be either 'mean', 'median', or 'best'.
This parameter controls the statistic that is used to compute the value
of the radar observation AT a rain gauge based on the neighbourhood
specified by parameter ``nnear_raws``.
mingages : int
Defaults to 5. Minimum number of valid gages required for an
adjustment. If less valid gauges are available, the adjustment
procedure will return unadjusted raw values. If you do not want to use
this feature, you need to set ``mingages=0``.
minval : float
If the gage or radar observation is below this threshold, the location
will not be used for adjustment. For additive adjustment, this value
should be set to zero (default value). For multiplicative adjustment,
values larger than zero might be chosen in order to minimize
artifacts.
mfb_args : dict
**Only used for AdjustMFB** - This set of parameters controls how the
mean field bias is computed. Items of the dictionary are:
- *method*: string
defaults to 'linregr' which fits a regression line through observed
and estimated values and than gets the bias from the inverse of
the slope.
Other values: 'mean' or 'median' compute the mean or the median of
the ratios between gauge and radar observations.
- *minslope*, *minr*, *maxp*:
When using method='linregr', these parameters control whether a
linear regression turned out to be robust (minimum allowable slope,
minimum allowable correlation, maximim allowable p-value). If the
regression result is not considered robust, no adjustment will
take place.
Ipclass : :class:`wradlib.ipol.IpolBase`
an interpolation class from :mod:`wradlib.ipol`
**Not used for AdjustMFB** - default value is
:class:`~wradlib.ipol.Idw` (Inverse Distance Weighting).
ipargs : dict
keyword arguments to create an instance of ipclass
**Not used for AdjustMFB** - for :class:`~wradlib.ipol.Idw`, these
keyword arguments would e.g. be ``nnear`` or ``p``.
Examples
--------
See :ref:`/notebooks/multisensor/wradlib_adjust_example.ipynb`.
"""
def __init__(
self,
obs_coords,
raw_coords,
nnear_raws=9,
stat="median",
mingages=5,
minval=0.0,
mfb_args=None,
ipclass=ipol.Idw,
**ipargs,
):
# Check arguments
if mfb_args is None:
mfb_args = dict(method="linregr", minslope=0.1, minr=0.5, maxp=0.01)
assert mfb_args["method"] in ["mean", "median", "linregr"], (
"Argument mfb_args['method'] has to be one "
"out of 'mean', 'median' or 'linregr'."
)
# These are the coordinates of the rain gage locations and
# the radar bin locations
self.obs_coords = self._make_coord_arrays(obs_coords)
self.raw_coords = self._make_coord_arrays(raw_coords)
# These are the general control parameters
# for all adjustment procedures
self.nnear_raws = nnear_raws
self.stat = stat
self.mingages = mingages
self.minval = minval
# Control parameters for specific adjustment procedures
# for AdjustMFB
self.mfb_args = mfb_args
# interpolation class and its keyword arguments
# ((needed for AdjustAdd, AdjustMultiply, AdjustMixed)
self.ipclass = ipclass
self.ipargs = ipargs
# create a default instance of interpolator
self.ip = ipclass(src=self.obs_coords, trg=self.raw_coords, **ipargs)
# This method will quickly retrieve the actual radar values
# at the gage locations
self.get_raw_at_obs = RawAtObs(
self.obs_coords, self.raw_coords, nnear=nnear_raws, stat=stat
)
def _checkip(self, ix, targets):
"""INTERNAL: Return a revised instance of the Interpolator class.
When an instance of an Adjust... class is created, an instance of the
desired
Interpolation class (argument ipclass) is created as attribute
*self.ip*). However, this instance is only valid in case all
observation points (attribute *self.obs_coords*) have valid
observation-radar pairs. In case points are missing (or in case the
instance is called in the sourse of cross validation), a new instance
has to be created which consideres the new constellation of
observation-radar pairs.
This method computes and returns this new instance.
Parameters
----------
ix : :py:class:`numpy:numpy.ndarray`
array of integers
These are the indices of observation points with valid
observation-radar pairs
targets : :py:class:`numpy:numpy.ndarray`
array of floats of shape (number of target points, 2)
Target coordinates for the interpolation
Returns
-------
output : :class:`wradlib.ipol.IpolBase`
an instance of a class that inherited from :class:`wradlib.ipol.IpolBase`
"""
# first, set interpolation targets (default: the radar coordinates)
targets_default = False
if targets is None:
targets = self.raw_coords
targets_default = True
# second, compute inverse distance neighbours
if (not len(ix) == len(self.obs_coords)) or (not targets_default):
return self.ipclass(self.obs_coords[ix], targets, **self.ipargs)
else:
return self.ip
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Parameters
----------
obs : :py:class:`numpy:numpy.ndarray`
flat (1-D) array of floats with shape (num gauges,)
These are the gage observations used for adjustment. This array
needs to be of the same length as the array "obs_coords" used to
initialize the adjustment object.
raw : :py:class:`numpy:numpy.ndarray`
flat (1-D) array of floats with shape (num radar cells,)
These are the raw (unadjusted) radar rainfall values. This array
needs to be of the same length as the array "raw_coords" used to
initialize the adjustment object.
targets : :py:class:`numpy:numpy.ndarray`
(INTERNAL - DO NOT USE)
Array of floats. Coordinate pairs for locations on which the final
adjustment product is interpolated
Defaults to None. In this case, the output locations will be
identical to the radar coordinates
rawatobs : :py:class:`numpy:numpy.ndarray`
(INTERNAL - DO NOT USE)
Array of floats. For internal use from AdjustBase.xvalidate only
(defaults to None)
ix : :py:class:`numpy:numpy.ndarray`
(INTERNAL - DO NOT USE)
Array of integers. For internal use from AdjustBase.xvalidate only
(defaults to None)
"""
pass
def _check_shape(self, obs, raw):
"""INTERNAL: Check consistency of the input data obs and raw with
the shapes of the coordinates
"""
# TODO
pass
def _get_valid_pairs(self, obs, raw):
"""INTERNAL: Helper method to identify valid obs-raw pairs"""
# checking input shape consistency
self._check_shape(obs, raw)
# radar values at gage locations
rawatobs = self.get_raw_at_obs(raw, obs)
# check where both gage and radar observations are valid
ix = np.intersect1d(
util._idvalid(obs, minval=self.minval),
util._idvalid(rawatobs, minval=self.minval),
)
return rawatobs, ix
def xvalidate(self, obs, raw):
"""Leave-One-Out Cross Validation, applicable to all gage adjustment
classes.
This method will be inherited to other Adjust classes. It should thus
be applicable to all adjustment procedures without any modification.
This way, the actual adjustment procedure has only to be defined *once*
in the :meth:`~wradlib.adjust.AdjustBase.__call__` method.
The output of this method can be evaluated by using the
`verify.ErrorMetrics` class.
Parameters
----------
obs : :py:class:`numpy:numpy.ndarray`
array of floats
raw : :py:class:`numpy:numpy.ndarray`
array of floats
Returns
-------
obs : :py:class:`numpy:numpy.ndarray`
array of floats
valid observations at those locations which have a valid radar
observation
estatobs : :py:class:`numpy:numpy.ndarray`
array of floats
estimated values at the valid observation locations
"""
rawatobs, ix = self._get_valid_pairs(obs, raw)
self.get_raws_directly_at_obs = RawAtObs(
self.obs_coords, self.raw_coords, nnear=1
)
raws_directly_at_obs = self.get_raws_directly_at_obs(raw)
ix = np.intersect1d(ix, util._idvalid(raws_directly_at_obs, minval=self.minval))
# Container for estimation results at the observation location
estatobs = np.zeros(obs.shape) * np.nan
# check whether enough gages remain for adjustment
if len(ix) <= (self.mingages - 1):
# not enough gages for cross validation: return empty arrays
return obs, estatobs
# Now iterate over valid pairs
for i in ix:
# Pass all valid pairs except ONE which you pass as target
ix_adjust = np.setdiff1d(ix, [i])
estatobs[i] = self.__call__(
obs,
raws_directly_at_obs[i],
self.obs_coords[i].reshape((1, -1)),
rawatobs,
ix_adjust,
)
return obs, estatobs
class AdjustAdd(AdjustBase):
"""Gage adjustment using an additive error model.
First, an instance of AdjustAdd has to be created. Calling this instance
then does the actual adjustment. The motivation behind this performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once.
AdjustAdd automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contains missing values, the
computation of the inverse distance weights needs to be repeated in
:meth:`~wradlib.adjust.AdjustAdd.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# The error is a difference
error = obs[ix] - rawatobs[ix]
# interpolate the error field
iperror = ip(error)
# add error field to raw and make sure no negatives occur
return np.where((raw + iperror) < 0.0, 0.0, raw + iperror)
class AdjustMultiply(AdjustBase):
"""Gage adjustment using a multiplicative error model
First, an instance of AdjustMultiply has to be created. Calling this
instance then does the actual adjustment. The motivation behind this
performance. In case the observation points are always the same for
different time steps, the computation of neighbours and inverse distance
weights only needs to be performed once during initialisation.
AdjustMultiply automatically takes care of invalid gage or radar
observations (e.g. NaN, Inf or other typical missing data flags such as
-9999). However, in case e.g. the observation data contain missing values,
the computation of the inverse distance weights needs to be repeated in
:meth:`~wradlib.adjust.AdjustMultiply.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:meth:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# computing the error
error = obs[ix] / rawatobs[ix]
# interpolate error field
iperror = ip(error)
# multiply error field with raw
return iperror * raw
class AdjustMixed(AdjustBase):
"""Gage adjustment using a mixed error model (additive and multiplicative).
The mixed error model assumes that you have both a multiplicative and an
additive error term. The intention is to overcome the drawbacks of the
purely additive and multiplicative approaches (see
:class:`~wradlib.adjust.AdjustAdd` and
:class:`~wradlib.adjust.AdjustMultiply`). The formal representation of the
error model according to :cite:`Pfaff2010` is:
.. math::
R_{gage} = R_{radar} \\cdot (1 + \\delta) +0 \\epsilon
:math:`\\delta` and :math:`\\epsilon` have to be assumed to be independent
and normally distributed. The present implementation is based on a Least
Squares estimation of :math:`\\delta` and :math:`\\epsilon` for each rain
gage location. :math:`\\delta` and :math:`\\epsilon` are then interpolated
and used to correct the radar rainfall field.
The least squares implementation uses the equation for the error model plus
the condition to minimize (:math:`\\delta^2 + \\epsilon^2`) for each gage
location. The idea behind this is that :math:`\\epsilon` dominates the
adjustment for small deviations between radar and gage while
:math:`\\delta` dominates in case of large deviations.
**Usage**:
First, an instance of AdjustMixed has to be created. Calling this instance
then does the actual adjustment. The motivation behind this is performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once during initialisation.
AdjustMixed automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contain missing values, the
computation of the inverse distance weights needs to be repeated in
:func:`~wradlib.adjust.AdjustMixed.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# computing epsilon and delta from least squares
epsilon = (obs[ix] - rawatobs[ix]) / (rawatobs[ix] ** 2 + 1.0)
delta = ((obs[ix] - epsilon) / rawatobs[ix]) - 1.0
# interpolate error fields
ipepsilon = ip(epsilon)
ipdelta = ip(delta)
# compute adjusted radar rainfall field
return (1.0 + ipdelta) * raw + ipepsilon
class AdjustMFB(AdjustBase):
"""Multiplicative gage adjustment using *one* correction factor for the \
entire domain.
This method is also known as the Mean Field Bias correction.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# # Get new Interpolator instance if necessary
# ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# compute ratios for each valid observation point
ratios = np.ma.masked_invalid(obs[ix] / rawatobs.ravel()[ix])
if len(np.where(np.logical_not(ratios.mask))[0]) < self.mingages:
# Not enough valid pairs of raw and obs
return raw
if self.mfb_args["method"] == "mean":
corrfact = np.mean(ratios)
elif self.mfb_args["method"] == "median":
corrfact = np.median(ratios)
elif self.mfb_args["method"] == "linregr":
corrfact = 1.0
ix_ = np.where(np.logical_not(ratios.mask))[0]
x = obs[ix][ix_]
y = rawatobs[ix][ix_]
# check whether we should adjust or not
try:
slope, intercept, r, p, stderr = stats.linregress(x, y)
except Exception:
slope, r, p = 0, 0, np.inf
if (
(slope > self.mfb_args["minslope"])
and (r > self.mfb_args["minr"])
and (p < self.mfb_args["maxp"])
):
x = x[:, np.newaxis]
try:
slope, _, _, _ = np.linalg.lstsq(x, y)
if not slope[0] == 0:
corrfact = 1.0 / slope[0]
except Exception:
# no correction if linear regression fails
pass
if type(corrfact) == np.ma.core.MaskedConstant:
corrfact = 1.0
return corrfact * raw
class AdjustNone(AdjustBase):
"""Same behaviour as the other adjustment classes, but simply returns the \
unadjusted data.
This class can be used for benchmark verification experiments as a control
for unadjusted data.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of unadjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
return raw
class GageOnly(AdjustBase):
"""Same behaviour as the other adjustment classes, but returns an \
interpolation of rain gage observations
First, an instance of GageOnly has to be created. Calling this instance
then does the actual adjustment. The motivation behind this performance.
In case the observation points are always the same for different time
steps, the computation of neighbours and inverse distance weights only
needs to be performed once during initialisation.
GageOnly automatically takes care of invalid gage or radar observations
(e.g. NaN, Inf or other typical missing data flags such as -9999).
However, in case e.g. the observation data contain missing values, the
computation of the inverse distance weights needs to be repeated in
:meth:`~wradlib.adjust.GageOnly.__call__` which is at the expense of
performance.
Note
----
Inherits from :class:`wradlib.adjust.AdjustBase`
For a complete overview of parameters for the initialisation of adjustment
objects, as well as an extensive example, please see
:class:`wradlib.adjust.AdjustBase`.
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
array of adjusted radar values
"""
def __call__(self, obs, raw, targets=None, rawatobs=None, ix=None):
"""Returns an array of ``raw`` values that are adjusted by ``obs``.
Calling an adjustment object works the same for all adjustment classes.
Detailed instructions on the parameters ``obs`` and ``raw`` are
provided in :meth:`wradlib.adjust.AdjustBase.__call__`.
"""
# ----------------GENERIC PART FOR MOST __call__ methods---------------
if (ix is None) or (rawatobs is None):
# Check for valid observation-radar pairs in case this method has
# not been called from self.xvalidate
rawatobs, ix = self._get_valid_pairs(obs, raw)
if len(ix) < self.mingages:
# Not enough valid gages for adjustment? - return unadjusted data
return raw
# Get new Interpolator instance if necessary
ip = self._checkip(ix, targets)
# -----------------THIS IS THE ACTUAL ADJUSTMENT APPROACH--------------
# interpolate gage observations
return ip(obs[ix])
class RawAtObs:
"""Get the raw values in the neighbourhood of the observation points
Parameters
----------
obs_coords : :py:class:`numpy:numpy.ndarray`
array of float
coordinate pairs of observations points
raw_coords : :py:class:`numpy:numpy.ndarray`
array of float
coordinate pairs of raw (unadjusted) field
nnear: int
number of neighbours which should be considered in the vicinity of each
point in obs
stat: str
function name
"""
def __init__(self, obs_coords, raw_coords, nnear=9, stat="median"):
self.statfunc = _get_statfunc(stat)
self.raw_ix = _get_neighbours_ix(obs_coords, raw_coords, nnear)
def __call__(self, raw, obs=None):
"""
Returns the values of raw at the observation locations
Parameters
----------
raw : :py:class:`numpy:numpy.ndarray`
array of float
raw values
"""
# get the values of the raw neighbours of obs
raw_neighbs = raw[self.raw_ix]
# and summarize the values of these neighbours
# by using a statistics option
# (only needed in case nnear > 1, i.e. multiple neighbours
# per observation location)
if raw_neighbs.ndim > 1:
return self.statfunc(obs, raw_neighbs)
else:
return raw_neighbs
def _get_neighbours_ix(obs_coords, raw_coords, nnear):
"""Returns ``nnear`` neighbour indices per ``obs_coords`` coordinate pair
Parameters
----------
obs_coords : :py:class:`numpy:numpy.ndarray`
array of float of shape (num_points,ndim)
in the neighbourhood of these coordinate pairs we look for neighbours
raw_coords : :py:class:`numpy:numpy.ndarray`
array of float of shape (num_points,ndim)
from these coordinate pairs the neighbours are selected
nnear : int
number of neighbours to be selected per coordinate pair of
``obs_coords``
"""
# plant a tree
tree = spatial.cKDTree(raw_coords)
# return nearest neighbour indices
return tree.query(obs_coords, k=nnear)[1]
def _get_statfunc(funcname):
"""Returns a function that corresponds to parameter ``funcname``
Parameters
----------
funcname : str
a name of a numpy function OR another option known by _get_statfunc
Potential options: 'mean', 'median', 'best'
"""
try:
# first try to find a numpy function which corresponds to <funcname>
func = getattr(np, funcname)
def newfunc(x, y):
return func(y, axis=1)
except Exception:
# then try to find a function in this module with name funcname
if funcname == "best":
newfunc = best
else:
# if no function can be found, raise an Exception
raise NameError("Unknown function name option: " + funcname)
return newfunc
def best(x, y):
"""Find the values of y which corresponds best to x
If x is an array, the comparison is carried out for each element of x
Parameters
----------
x : float | :py:class:`numpy:numpy.ndarray`
float or 1-d array of float
y : :py:class:`numpy:numpy.ndarray`
array of float
Returns
-------
output : :py:class:`numpy:numpy.ndarray`
1-d array of float with length len(y)
"""
if type(x) == np.ndarray:
assert x.ndim == 1, "x must be a 1-d array of floats or a float."
assert len(x) == len(y), "Length of x and y must be equal."
if type(y) == np.ndarray:
assert y.ndim <= 2, "y must be 1-d or 2-d array of floats."
else:
raise ValueError("y must be 1-d or 2-d array of floats.")
x = np.array(x).reshape((-1, 1))
if y.ndim == 1:
y = np.array(y).reshape((1, -1))
axis = None
else:
axis = 1
return y[np.arange(len(y)), np.argmin(np.abs(x - y), axis=axis)]
if __name__ == "__main__":
print("wradlib: Calling module <adjust> as main...")
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2013 ton1517 <tonton1517@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""git-ls-date
Usage:
git ls-date [--date=<option>] [--format=<format>] [<path>]...
git ls-date -h | --help
git ls-date -v | --version
Options:
-h --help Show help.
-v --version Show version.
-d --date=(relative|local|default|iso|rfc|short|raw) Date option.(default: short)
-f --format=<format> Show-format option. See SHOW FORMAT.(default: "{fd} {fh} {ld} {lh} {f}")
SHOW FORMAT:
--format option allows you to specify which information you want to show.
placeholders:
* {ld}: last commit date
* {fd}: first commit date
* {lh}: last commit hash
* {fh}: first commit hash
* {f}: filename
See https://github.com/ton1517/git-ls-date
"""
from subprocess import Popen, PIPE
import sys
import getopt
import re
#=======================================
# config
#=======================================
_name = 'git-ls-date'
_version = '0.1.1'
_license = 'MIT License'
_description = 'git-ls-date is git sub command shows first and last commit date.'
_url = 'https://github.com/ton1517/git-ls-date'
_author = 'ton1517'
_author_email = 'tonton1517@gmail.com'
def help():
version()
print("\n%s\n%s\nby %s <%s>\n" % (_description, _url, _author, _author_email))
usage()
def usage():
print(__doc__)
def version():
print("%s %s" % (_name, _version))
class Configuration(object):
"""parse comannd option and set configuration."""
shortopts = "hvd:f:"
longopts = ["help", "version", "date=", "format="]
date_default = "short"
format_default = "{fd} {fh} {ld} {lh} {f}"
def __init__(self):
self.paths = None
self.date = None
self.__config_hash = {}
self.__read_gitconfig()
self.date = self.__config_hash.get("date", self.date_default)
self.format = self.__config_hash.get("format",self.format_default)
def __read_gitconfig(self):
config_lines = git("config --get-regexp " + _name).split("\n")[:-1]
config_re = re.compile("%s\.(.*?) (.*)" % _name)
for line in config_lines:
result = config_re.search(line)
self.__config_hash[result.group(1)] = result.group(2)
def argparse(self, args = []):
"""parse commandline arguments.
Arg : commandline arguments. you should exclude commandline name.
for example 'configuration.argparse(sys.argv[1:])'
"""
try:
opts, args = getopt.getopt(args, self.shortopts, self.longopts)
except getopt.GetoptError as e:
usage()
sys.exit(1)
self.pathes = args
for opt, value in opts:
value = value.strip()
if opt == "--help" or opt == "-h":
help()
sys.exit()
elif opt == "--version" or opt == "-v":
version()
sys.exit()
elif opt == "--date" or opt == "-d":
self.date = value
elif opt == "--format" or opt == "-f":
self.format = value
try:
self.format.format(ld="",fd="",lh="",fh="",f="")
except (KeyError, ValueError) as e:
print("Invalid format error.")
print(e)
sys.exit(1)
else:
usage()
sys.exit(1)
#=======================================
# git
#=======================================
class GitCommandErrorException(Exception):
"""if git returns error, raise this exception."""
def __init__(self, command, message):
self.message = message
self.command = command
def __str__(self):
return self.command + "\n" + self.message
def git(cmd):
"""run git command.
Return : return command output string
Raise : GitCommandErrorException
"""
stdout, stderr = Popen("git " + cmd, shell=True, stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()
if stderr:
raise GitCommandErrorException(cmd, stderr.decode())
return stdout.decode()
class Commit(object):
def __init__(self, date, hash):
self.date = date
self.hash = hash
def __str__(self):
return "%s %s" % (self.date, self.hash)
class FilesParser(object):
"""FilesParser run 'git ls-files' and parse."""
def __init__(self, pathes = []):
self.pathes = pathes if type(pathes) is list else [pathes]
self.__abbrev_to_full = {}
self.__full_to_abbrev = {}
self.files = None
self.files_full = None
self.__parse_files()
def __parse_files(self):
args = " ".join(["\'%s\'" % f for f in self.pathes])
self.files = git("ls-files "+args).split("\n")[:-1]
self.files_full = git("ls-files --full-name "+args).split("\n")[:-1]
for i, f in enumerate(self.files):
full = self.files_full[i]
self.__abbrev_to_full[f] = full
self.__abbrev_to_full[full] = full
self.__full_to_abbrev[full] = f
self.__full_to_abbrev[f] = f
def get_full(self, file):
""" return full path.
Arg : filename
Return : return full path. if no result, return None.
"""
return self.__abbrev_to_full.get(file)
def get_abbrev(self, full_path):
""" return abbrev path.
Arg : full path string
Return : return abbrev path. if no result, return None.
"""
return self.__full_to_abbrev.get(full_path)
class LogParser(object):
"""LogParser runs 'git log' and parse."""
log_format = "log --oneline --name-only --author-date-order -c --pretty=format:'%h %ad' --date="
def __init__(self, files_parser, date_option = None):
self.files_parser = files_parser
self.date_option = date_option
self.commits = []
self.__commit_contains_file_hash = {}
self.files_quote = ["\'%s\'"%f for f in self.files_parser.files]
self.log_format += date_option if date_option else "local"
self.__parse_log()
def __parse_log(self):
log_str = git(self.log_format+" "+" ".join(self.files_quote))[:-1].split("\n\n")
for l in log_str:
one_commit = l.split("\n")
date, hash, files = self.__parse_one_commit_contains_filename(one_commit)
commit = Commit(date, hash)
self.commits.append(commit)
for f in files:
self.__append_commit(f, commit)
def __append_commit(self, key_file, commit):
commit_list = self.__commit_contains_file_hash.get(key_file, [])
commit_list.append(commit)
self.__commit_contains_file_hash[key_file] = commit_list
def __parse_one_commit_contains_filename(self, one_commit):
commit_info = one_commit[0]
date, hash = self.__parse_one_commit(commit_info)
files = one_commit[1:]
return date, hash, files
def __parse_one_commit(self, one_commit):
return one_commit[8:], one_commit[:7]
def get_commits_contains(self, file):
"""return commits that contains file.
Arg : filename
Return : commit list. if file has no commit, return None.
"""
full_path = self.files_parser.get_full(file)
commits = self.__commit_contains_file_hash.get(full_path)
return commits
def get_first_commit_contains(self, file):
"""return commit that file are changed last.
Arg : filename
Return : commit object
"""
commits = self.get_commits_contains(file)
return commits[-1] if commits else None
def get_last_commit_contains(self, file):
"""return commit that file are added first.
Arg : filename
Return : commit object
"""
commits = self.get_commits_contains(file)
return commits[0] if commits else None
#=======================================
# main
#=======================================
def main():
try:
config = Configuration()
config.argparse(sys.argv[1:])
files_parser = FilesParser(config.pathes)
parser = LogParser(files_parser, config.date)
except GitCommandErrorException as e:
print(e)
sys.exit(1)
for f in files_parser.files:
fc = parser.get_first_commit_contains(f)
lc = parser.get_last_commit_contains(f)
formatted_info = config.format.format(fd=fc.date, fh=fc.hash, ld=lc.date, lh=lc.hash, f=f)
print(formatted_info)
if __name__ == "__main__":
main()
|
|
#
#
#
# Conditions ##############################################
import pprint
# Base class
class Condition(object):
def __str__(self):
return pprint.pformat(self.to_dict())
class NilCondition(Condition):
'''
DO NOT USE THIS CLASS!!!
'''
def to_dict(self):
return {'query_string': {'query': '*'}}
# Atom:
class AtomCondition(Condition):
'''
Make sure that the atom condition can be used in query.
NOTE: Some condition may only be used in filter (I guess).
'''
def is_atom(self):
return True
class Equal(AtomCondition):
'''
Need the field is not analyzed.
'''
def __init__(self, field, value):
self.field = field
self.value = value
def to_dict(self):
return {'term': {self.field: self.value}}
class Prefix(AtomCondition):
def __init__(self, field, value):
self.field = field
self.value = value
def to_dict(self):
return {'prefix': {self.field: self.value}}
class Range(AtomCondition):
def __init__(self, field, start, end):
self.field = field
self.start = start
self.end = end
def to_dict(self):
return {'range': {
self.field: {
'gte': self.start,
'lt': self.end,
}
}}
# Not Atom:
class NotAtomCondition(Condition):
def is_atom(self):
return False
class And(NotAtomCondition):
def __init__(self, conditions):
'''
"conditions" is a list of Condition
'''
self.conditions = conditions
def to_dict(self):
'''
Return {
"and": [
condition1,
condition2,
condition3,
...
]
}
'''
l = []
for condition in self.conditions:
l.append(condition.to_dict())
return {'and': l}
class Or(NotAtomCondition):
def __init__(self, conditions):
'''
"conditions" is a list of Condition
'''
self.conditions = conditions
def to_dict(self):
'''
Return {
"or": [
condition1,
condition2,
condition3,
...
]
}
'''
l = []
for condition in self.conditions:
l.append(condition.to_dict())
return {'or': l}
class Not(NotAtomCondition):
def __init__(self, condition):
'''
"condition" is a instance of Condition (NOT LIST)
'''
self.condition = condition
def to_dict(self):
return {'not': self.condition.to_dict()}
###########################################################
# ESQuery Type ############################################
class ESQueryType(object):
def is_facet(self):
return False
def is_hits(self):
return False
class Hits(ESQueryType):
def __init__(self, size, fields=[], condition=None, sort=None):
'''
parameters:
size: integer
fields: list of string, optional, default all fields
condition: Condition, optional
sort: a tuple with two elements,
the first is a field and the other is a boolean, optional
'''
self.size = size
self.fields = fields
self.condition = condition
self.sort = sort
def is_hits(self):
return True
def to_dict(self):
def sort_to_dict(sort):
field = sort[0]
reverse = sort[1]
if reverse:
dire = 'desc'
else:
dire = 'asc'
d = {}
d[field] = dire
return d
d = {}
d['size'] = self.size
if self.fields != []:
d['fields'] = self.fields
if self.condition is not None:
d['filter'] = self.condition.to_dict()
if self.sort is not None:
d['sort'] = sort_to_dict(self.sort)
return d
class Facet(ESQueryType):
def __init__(self, name, condition):
self.name = name
self.condition = condition
def is_facet(self):
return True
def _add_facets_filter(self, d):
'''
This function is called by its subclass to add a facet_filter
'''
if self.condition is not None:
d['facet_filter'] = self.condition.to_dict()
return d
class Terms(Facet):
ES_FACET_TYPE = 'terms'
def __init__(self, name, field, size, condition=None):
super(Terms, self).__init__(name, condition)
self.field = field
self.size = size
def to_dict(self):
'''
Return an ES facet dictionary.
If self.condition is None then return dictionary:
{
"terms": {
"field": self.field,
"size": self.size,
"order": "count",
}
}
else return dictionary:
{
"terms": {
"field": self.field,
"size": self.size,
"order": "count",
},
"facet_filter": self.condition.to_dict()
}
'''
return self._add_facets_filter({
Terms.ES_FACET_TYPE: {
'field': self.field,
'size': self.size,
'order': 'count',
}
})
class Histogram(Facet):
ES_FACET_TYPE = 'date_histogram'
def __init__(self, name, timestamp_field, interval, condition=None):
super(Histogram, self).__init__(name, condition)
self.interval = interval
self.ts_field = timestamp_field
def to_dict(self):
'''
Return an ES facet dictionary.
If self.condition is None then return dictionary:
{
"date_histogram": {
"key_field": timestamp_field,
"interval": interval,
}
}
else return dictionary:
{
"date_histogram": {
"key_field": timestamp_field,
"interval": interval,
},
"facet_filter": self.condition.to_dict()
}
'''
return self._add_facets_filter({
Histogram.ES_FACET_TYPE: {
'key_field': self.ts_field,
'interval': self.interval,
}
})
###########################################################
def translate(hits, facets=[], conditions=[]):
'''
<facets> is a list of Facet
<conditions> is a list of Condition
'''
def select_a_query_condition(conditions):
qcond = NilCondition()
for condition in conditions:
if condition.is_atom():
qcond = condition
break
rest = [c for c in conditions if c != qcond]
return qcond, rest
def conditions_to_dict(conditions):
qcond, rest = select_a_query_condition(conditions)
if len(rest) == 0:
return qcond.to_dict()
else:
if len(rest) > 1:
rest_cond = And(rest)
else:
rest_cond = rest[0]
return {
'filtered': {
'query': qcond.to_dict(),
'filter': rest_cond.to_dict(),
}}
d = {}
d['query'] = conditions_to_dict(conditions)
if len(facets) > 0:
fsd = {}
for facet in facets:
fsd[facet.name] = facet.to_dict()
d['facets'] = fsd
d.update(hits.to_dict())
return d
|
|
###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import json
import datetime
from twisted.python import log
from twisted.internet import defer
from autobahn import util
from autobahn.wamp import auth
from autobahn.wamp import types
from autobahn.twisted.wamp import ApplicationSession, RouterSession
from autobahn.twisted.websocket import WampWebSocketServerProtocol, WampWebSocketServerFactory
class UserDb:
"""
A fake user database.
"""
def __init__(self):
self._creds = {}
def add(self, authid, authrole, secret, salt = None):
if salt:
key = auth.derive_key(secret, salt)
else:
key = secret
self._creds[authid] = (salt, key, authrole)
return self._creds[authid]
def get(self, authid):
## we return a deferred to simulate an asynchronous lookup
return defer.succeed(self._creds.get(authid, (None, None, None)))
class PendingAuth:
"""
User for tracking pending authentications.
"""
def __init__(self, key, session, authid, authrole, authmethod, authprovider):
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
self.session = session
self.timestamp = util.utcnow()
self.nonce = util.newid()
challenge_obj = {
'authid': self.authid,
'authrole': self.authrole,
'authmethod': self.authmethod,
'authprovider': self.authprovider,
'session': self.session,
'nonce': self.nonce,
'timestamp': self.timestamp
}
self.challenge = json.dumps(challenge_obj)
self.signature = auth.compute_wcs(key, self.challenge)
class MyRouterSession(RouterSession):
"""
Our custom router session that authenticates via WAMP-CRA.
"""
@defer.inlineCallbacks
def onHello(self, realm, details):
"""
Callback fired when client wants to attach session.
"""
print("onHello: {} {}".format(realm, details))
self._pending_auth = None
if details.authmethods:
for authmethod in details.authmethods:
if authmethod == u"wampcra":
## lookup user in user DB
salt, key, role = yield self.factory.userdb.get(details.authid)
## if user found ..
if key:
## setup pending auth
self._pending_auth = PendingAuth(key, details.pending_session,
details.authid, role, authmethod, "userdb")
## send challenge to client
extra = {
u'challenge': self._pending_auth.challenge
}
## when using salted passwords, provide the client with
## the salt and then PBKDF2 parameters used
if salt:
extra[u'salt'] = salt
extra[u'iterations'] = 1000
extra[u'keylen'] = 32
defer.returnValue(types.Challenge(u'wampcra', extra))
## deny client
defer.returnValue(types.Deny())
def onAuthenticate(self, signature, extra):
"""
Callback fired when a client responds to an authentication challenge.
"""
print("onAuthenticate: {} {}".format(signature, extra))
## if there is a pending auth, and the signature provided by client matches ..
if self._pending_auth:
if signature == self._pending_auth.signature:
## accept the client
return types.Accept(authid = self._pending_auth.authid,
authrole = self._pending_auth.authrole,
authmethod = self._pending_auth.authmethod,
authprovider = self._pending_auth.authprovider)
else:
## deny client
return types.Deny(message = u"signature is invalid")
else:
## deny client
return types.Deny(message = u"no pending authentication")
class TimeService(ApplicationSession):
"""
A simple time service application component.
"""
def onJoin(self, details):
print("session attached")
def utcnow():
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
self.register(utcnow, 'com.timeservice.now')
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import serverFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("-c", "--component", type = str, default = None,
help = "Start WAMP-WebSocket server with this application component, e.g. 'timeservice.TimeServiceBackend', or None.")
parser.add_argument("--websocket", type = str, default = "tcp:8080",
help = 'WebSocket server Twisted endpoint descriptor, e.g. "tcp:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", type = str, default = "ws://localhost:8080",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
args = parser.parse_args()
log.startLogging(sys.stdout)
## we use an Autobahn utility to install the "best" available Twisted reactor
##
from autobahn.twisted.choosereactor import install_reactor
reactor = install_reactor()
if args.debug:
print("Running on reactor {}".format(reactor))
## create a WAMP router factory
##
from autobahn.twisted.wamp import RouterFactory
router_factory = RouterFactory()
## create a user DB
##
userdb = UserDb()
userdb.add(authid = "peter", authrole = "user", secret = "secret1", salt = "salt123")
userdb.add(authid = "joe", authrole = "user", secret = "secret2")
## create a WAMP router session factory
##
from autobahn.twisted.wamp import RouterSessionFactory
session_factory = RouterSessionFactory(router_factory)
session_factory.session = MyRouterSession
session_factory.userdb = userdb
## start an embedded application component ..
##
component_config = types.ComponentConfig(realm = "realm1")
component_session = TimeService(component_config)
session_factory.add(component_session)
## create a WAMP-over-WebSocket transport server factory
##
from autobahn.twisted.websocket import WampWebSocketServerFactory
transport_factory = WampWebSocketServerFactory(session_factory, args.wsurl, debug = False, debug_wamp = args.debug)
transport_factory.setProtocolOptions(failByDrop = False)
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.resource import WebSocketResource
## we serve static files under "/" ..
root = File(".")
## .. and our WebSocket server under "/ws"
resource = WebSocketResource(transport_factory)
root.putChild("ws", resource)
## run both under one Twisted Web Site
site = Site(root)
site.noisy = False
site.log = lambda _: None
## start the WebSocket server from an endpoint
##
server = serverFromString(reactor, args.websocket)
server.listen(site)
## now enter the Twisted reactor loop
##
reactor.run()
|
|
import copy
import logging
import random
import threading
import time
import yaml
import sys
from pybloom import BloomFilter
from Queue import Queue
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from .gossipService.gossiping.Gossiping import Client, Processor
from .gossipService.gossiping.ttypes import GossipStatus, GossipNode, GossipData, GossipNodeView
from .gossipheart import GossipServiceHeart
from .utils import make_client, destroy_client
logging.basicConfig( level=logging.INFO )
logger = logging.getLogger( __name__ )
class GossipServiceHandler( object ):
THRESHOLD = 0.0001
def __init__( self, config ):
#create a bloom filter with a capaticy of a million messages
#with an error rate of 0.000001%
self.messages = BloomFilter( capacity=1000000, error_rate = 0.0001 )
self.storage = {}
self.config = yaml.load( open( config ) )
self._fanout = int( self.config["fanout"] )
self._tick = int( self.config["tick"] ) / 1000
self._pulseTicks = int( self.config["pulseTicks"] )
self._roundTime = self._tick * self._pulseTicks
self._queue = Queue()
logger.info( "Sleeping between rounds for %f seconds." % self._roundTime )
self._status = GossipStatus.IDLE
self._node = GossipNode( self.config["address"], int( self.config["port"] ), self._status )
self._id = "%s:%s" % ( self.config["address"], self.config["port"] )
self.reload_nodes()
self._heart = GossipServiceHeart( self )
self._heart.start()
self.jobs = []
self._leader = False
@classmethod
def Server( cls, config ):
handler = cls( config )
processor = Processor( handler )
transport = TSocket.TServerSocket( port=int( handler.config["port"] ) )
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadedServer( processor, transport, tfactory, pfactory );
return server
def round( self ):
"""
This is called in the heart thread, so it can block.
"""
try:
message = self._queue.get( block=False, timeout=self._pulseTicks )
message[0]( *message[1] )
except Exception as e:
if( not str( e ) == "" ):
exc_type, exc_obj, exc_tb = sys.exc_info()
logger.info( "%s" % ( e ) )
raise
def view( self, view ):
"""
This will take in the nodeList from
the other peer and then randomly merge
the two lists.
{
neighborhood: { "b": [ "a", "c" ],
"c": [ "a", "b" ],
}
view: [
"b",
"c"
]
}
"""
ret = self._view
self._view = GossipNodeView()
self._view.view = ret.view[:]
self._view.neighborhood = copy.deepcopy( ret.neighborhood )
self._view.owner = self._id
for k in view.view:
if k in self._view.neighborhood:
if not view.owner in self._view.neighborhood[k]:
self._view.neighborhood[k].append( view.owner )
else:
self._view.neighborhood[k] = [ view.owner ]
if len( self._view.view ) < self._fanout:
self._view.view.append( view.owner )
return ret
def get_view( self ):
return self._view
def new_job( self, job ):
"""
This will start the Ant server and
start the recruiting process for the
job itself.
This process entails the following:
- Sending out a broadcast message to recruit workers.
- Starting the Ant server.
"""
if( not self._leader ):
self._leader = True
#Spawn the ant server first.
self._spawn_ant()
#Setup the job before the recruitment process.
self._ant_client.new_job( job )
#Recruit ants for the job.
#self._queue.push( ( self._recruit, ( job, ), ) )
self.recruit( job )
def recruit( self, job ):
if( not data.uuid in self.messages and not self._leader ):
data.hops += 1
job_tuple = ( math.exp( -( data.hops - data-priority ) ), data, )
logger.info( job_tuple )
if( job_tuple[0] >= THRESHOLD )
self.jobs.append( job )
self.jobs.sort( key=lambda a: a[0] )
njob = self.jobs[0]
if( self.ant == None ):
self._spawn_ant()
self._ant_client.new_job( job_tuple )
else:
self._ant_client.signal_new_job( njob[1] )
self._queue.put( ( self._recruit, ( job, ), ) )
self.messages.add( data.uuid )
def _recruit( self, job ):
logger.info( "Recruiting: %s" % job )
for n in self._view.view:
logger.info( n )
c = make_client( n )
c.recruit( job )
destroy_client( c )
def disseminate( self, data ):
elif( isinstance( data, GossipData ) ):
self.storage[data.key] = data.value
self._queue.put( ( self._disseminate, ( data, ), ) )
self.messages.add( data.uuid )
def _disseminate( self, data ):
logger.info( "Disseminating: %s" % data )
for n in self._view.view:
logger.info( n )
c = make_client( n )
c.disseminate( data )
destroy_client( c )
logger.info( "Done disseminating." )
def getData( self ):
data = [ GossipData( uuid="", key=k, value=v ) for k, v in self.storage.iteritems() ]
return data
def _added_to_view( self ):
logger.info( "Requesting that I be added to my view's zk lists." )
for n in self._view.view:
c = make_client( n )
c.added_to_view( self._node )
destroy_client( c )
def _spawn_ant( self ):
"""
Spawn the Ant server so that the job can be processed.
"""
if( not self._ant_running ):
handler = AntZooServiceHandler( self )
processor = AntZooService.Processor( handler )
transport = TSocket.TServerSocket( self.config["ant_port"] )
tfactory = TTransport.TBufferedTrasnportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
server = TServer.TThreadedServer( processor, transport, tfactory, pfactory )
self._ant_server = server
self._ant_client = self._ant_server.make_client()
#This is kind of a hack
#Subclass the TServer and allow
#for graceful shutdown.
class AntThread( threading.Thread ):
def __init__( self, gossip ):
super( AntThread, self ).__init__()
self.gossip = gossip
def run( self ):
self.gossip._ant_server.serve()
self._ant = AntThread( self )
self._ant.run()
else:
logger.info( "Ant is already running." )
def exchangeViews( self ):
for n in self._view.view:
c = make_client( n )
logger.info( "Connecting to %s" % n )
c.view( self._view )
def reload_nodes( self ):
self._view = self._load_saved_list()
self._view.owner = self._id
def _load_saved_list( self ):
nodeList = yaml.load( open( self.config["node_list"] ) )
ret = GossipNodeView()
ret.neighborhood = nodeList["neighborhood"]
ret.view = nodeList["view"]
return ret
def _save_nodes( self ):
out = { "neighborhood": self._view.neighborhood, "view": self._view.view }
with open( self.config["node_list"], "w" ) as f:
f.write( yaml.dump( out ) )
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, redirect, jsonify, \
url_for, flash
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Category, Base, Product, User
from flask import session as login_session
import random
import string
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
import requests
from flask import make_response
app = Flask(__name__)
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = 'Music Catalog app'
engine = create_engine('sqlite:///musiccatalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
# return "The current session state is %s" % login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps('''Failed to upgrade
the authorization code.'''), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = \
'https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' \
% access_token
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = \
make_response(json.dumps('''Token's user ID
doesn't match given user ID.'''), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = \
make_response(json.dumps('''Token's client ID
does not match app's.'''), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('''Current user
is already connected.'''), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;' \
'border-radius: 150px;-webkit-border-radius: 150px;' \
'-moz-border-radius: 150px;"> '
flash('you are now logged in as %s' % login_session['username'])
print 'done!'
return output
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
if access_token is None:
print 'Access Token is None'
response = \
make_response(json.dumps('Current user not connected.'),
401)
response.headers['Content-Type'] = 'application/json'
return response
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' \
% login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return redirect(url_for('showCategories'))
else:
response = make_response(json.dumps('''Failed to revoke
token for given user.''', 400))
response.headers['Content-Type'] = 'application/json'
return response
def createUser(login_session):
newUser = User(name=login_session['username'],
email=login_session['email'],
picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except:
return None
@app.route('/categories/JSON/')
def showCategoriesJSON():
categories = session.query(Category).all()
return jsonify(categories=[i.serialize for i in categories])
@app.route('/categories/<int:category_id>/products/JSON/')
def showProductsJSON(category_id):
products = \
session.query(Product).filter_by(category_id=category_id).all()
return jsonify(products=[i.serialize for i in products])
@app.route('/categories/<int:category_id>/products/<int:product_id>/JSON/'
)
def showSingleProductJSON(category_id, product_id):
product = session.query(Product).filter_by(id=product_id).one()
return jsonify(product=product.serialize)
@app.route('/')
@app.route('/categories/')
def showCategories():
categories = session.query(Category).all()
if 'username' not in login_session:
return render_template('publicCatalog.html',
categories=categories)
else:
return render_template('catalog.html', categories=categories)
@app.route('/categories/new/', methods=['GET', 'POST'])
def newCategory():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newCategory = Category(name=request.form['name'],
user_id=login_session['user_id'])
session.add(newCategory)
session.commit()
flash('New category created')
return redirect(url_for('showCategories'))
else:
return render_template('newCategory.html')
@app.route('/categories/<int:category_id>/edit/', methods=['GET', 'POST'])
def editCategory(category_id):
categoryToEdit = \
session.query(Category).filter_by(id=category_id).one()
if 'username' not in login_session:
return redirect('/login')
if categoryToEdit.user_id != login_session['user_id']:
return "You are not authorized to edit this category"
if request.method == 'POST':
if request.form['name']:
categoryToEdit.name = request.form['name']
flash('Category successfully edited')
return redirect(url_for('showCategories'))
else:
return render_template('editCategory.html',
category=categoryToEdit)
@app.route('/categories/<int:category_id>/delete/', methods=['GET',
'POST'])
def deleteCategory(category_id):
categoryToDelete = \
session.query(Category).filter_by(id=category_id).one()
if 'username' not in login_session:
return redirect('/login')
if categoryToDelete.user_id != login_session['user_id']:
return 'You are not authorized to delete this category.'
if request.method == 'POST':
session.delete(categoryToDelete)
session.commit()
flash('Category successfully deleted')
return redirect(url_for('showCategories'))
else:
return render_template('deleteCategory.html',
category=categoryToDelete)
@app.route('/categories/<int:category_id>/products/')
def showProducts(category_id):
category = session.query(Category).filter_by(id=category_id).one()
products = \
session.query(Product).filter_by(category_id=category_id).all()
creator = getUserInfo(category.user_id)
if ('username' not in login_session or
creator.id != login_session['user_id']):
return render_template('publicproducts.html',
category=category, products=products)
else:
return render_template('products.html', category=category,
products=products)
@app.route(
'/categories/<int:category_id>/products/new/', methods=['GET', 'POST'])
def newProduct(category_id):
if 'username' not in login_session:
return redirect('/login')
category = session.query(Category).filter_by(id=category_id).one()
if category.user_id != login_session['user_id']:
return 'You are not authorized to create a new product.'
if request.method == 'POST':
newProduct = Product(name=request.form['name'],
description=request.form['description'],
price=request.form['price'],
category_id=category_id,
user_id=login_session['user_id'])
session.add(newProduct)
session.commit()
flash('New product successfully added')
return redirect(url_for('showProducts',
category_id=category_id))
else:
return render_template('newproduct.html')
@app.route(
'/categories/<int:category_id>/products/<int:product_id>/edit/',
methods=['GET', 'POST'])
def editProduct(category_id, product_id):
category = session.query(Category).filter_by(id=category_id).one()
product = session.query(Product).filter_by(id=product_id).one()
if 'username' not in login_session:
return redirect('/login')
if category.user_id != login_session['user_id']:
return 'You are not authorized to edit this product.'
if request.method == 'POST':
if request.form['name']:
product.name = request.form['name']
if request.form['description']:
product.description = request.form['description']
if request.form['price']:
product.price = request.form['price']
session.add(product)
session.commit()
flash('Product successfully edited')
return redirect(url_for('showProducts',
category_id=category_id))
else:
return render_template('editproduct.html',
category_id=category_id,
product_id=product_id, product=product)
@app.route(
'/categories/<int:category_id>/products/<int:product_id>/delete/',
methods=['GET', 'POST'])
def deleteProduct(category_id, product_id):
category = session.query(Category).filter_by(id=category_id).one()
product = session.query(Product).filter_by(id=product_id).one()
if 'username' not in login_session:
return redirect('/login')
if category.user_id != login_session['user_id']:
return 'You are not authorized to delete this product.'
if request.method == 'POST':
session.delete(product)
session.commit()
flash('Product successfully deleted')
return redirect(url_for('showProducts',
category_id=category_id))
else:
return render_template('deleteproduct.html', category=category,
product=product)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host='0.0.0.0', port=8000)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper around the MobileNet v2 models for Keras, for object detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from object_detection.core import freezable_batch_norm
from object_detection.models.keras_models import model_utils
from object_detection.utils import ops
# pylint: disable=invalid-name
# This method copied from the slim mobilenet base network code (same license)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class _LayersOverride(object):
"""Alternative Keras layers interface for the Keras MobileNetV2."""
def __init__(self,
batchnorm_training,
default_batchnorm_momentum=0.999,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None):
"""Alternative tf.keras.layers interface, for use by the Keras MobileNetV2.
It is used by the Keras applications kwargs injection API to
modify the Mobilenet v2 Keras application with changes required by
the Object Detection API.
These injected interfaces make the following changes to the network:
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v2 body. Default is
`None` to use the default mobilenet_v2 network layout.
"""
self._alpha = alpha
self._batchnorm_training = batchnorm_training
self._default_batchnorm_momentum = default_batchnorm_momentum
self._conv_hyperparams = conv_hyperparams
self._use_explicit_padding = use_explicit_padding
self._min_depth = min_depth
self._conv_defs = conv_defs
self.regularizer = tf.keras.regularizers.l2(0.00004 * 0.5)
self.initializer = tf.truncated_normal_initializer(stddev=0.09)
def _FixedPaddingLayer(self, kernel_size):
return tf.keras.layers.Lambda(lambda x: ops.fixed_padding(x, kernel_size))
def Conv2D(self, filters, **kwargs):
"""Builds a Conv2D layer according to the current Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
filters: The number of filters to use for the convolution.
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras Conv2D layer to
the input argument, or that will first pad the input then apply a Conv2D
layer.
"""
# Make sure 'alpha' is always applied to the last convolution block's size
# (This overrides the Keras application's functionality)
layer_name = kwargs.get('name')
if layer_name == 'Conv_1':
if self._conv_defs:
filters = model_utils.get_conv_def(self._conv_defs, 'Conv_1')
else:
filters = 1280
if self._alpha < 1.0:
filters = _make_divisible(filters * self._alpha, 8)
# Apply the minimum depth to the convolution layers
if (self._min_depth and (filters < self._min_depth)
and not kwargs.get('name').endswith('expand')):
filters = self._min_depth
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
else:
kwargs['kernel_regularizer'] = self.regularizer
kwargs['kernel_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.Conv2D(filters, **kwargs)(padded_features)
return padded_conv
else:
return tf.keras.layers.Conv2D(filters, **kwargs)
def DepthwiseConv2D(self, **kwargs):
"""Builds a DepthwiseConv2D according to the Object Detection config.
Overrides the Keras MobileNetV2 application's convolutions with ones that
follow the spec specified by the Object Detection hyperparameters.
Args:
**kwargs: Keyword args specified by the Keras application for
constructing the convolution.
Returns:
A one-arg callable that will either directly apply a Keras DepthwiseConv2D
layer to the input argument, or that will first pad the input then apply
the depthwise convolution.
"""
if self._conv_hyperparams:
kwargs = self._conv_hyperparams.params(**kwargs)
# Both the regularizer and initializer apply to the depthwise layer in
# MobilenetV1, so we remap the kernel_* to depthwise_* here.
kwargs['depthwise_regularizer'] = kwargs['kernel_regularizer']
kwargs['depthwise_initializer'] = kwargs['kernel_initializer']
else:
kwargs['depthwise_regularizer'] = self.regularizer
kwargs['depthwise_initializer'] = self.initializer
kwargs['padding'] = 'same'
kernel_size = kwargs.get('kernel_size')
if self._use_explicit_padding and kernel_size > 1:
kwargs['padding'] = 'valid'
def padded_depthwise_conv(features):
padded_features = self._FixedPaddingLayer(kernel_size)(features)
return tf.keras.layers.DepthwiseConv2D(**kwargs)(padded_features)
return padded_depthwise_conv
else:
return tf.keras.layers.DepthwiseConv2D(**kwargs)
def BatchNormalization(self, **kwargs):
"""Builds a normalization layer.
Overrides the Keras application batch norm with the norm specified by the
Object Detection configuration.
Args:
**kwargs: Only the name is used, all other params ignored.
Required for matching `layers.BatchNormalization` calls in the Keras
application.
Returns:
A normalization layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_batch_norm(
training=self._batchnorm_training,
name=name)
else:
return freezable_batch_norm.FreezableBatchNorm(
training=self._batchnorm_training,
epsilon=1e-3,
momentum=self._default_batchnorm_momentum,
name=name)
def Input(self, shape):
"""Builds an Input layer.
Overrides the Keras application Input layer with one that uses a
tf.placeholder_with_default instead of a tf.placeholder. This is necessary
to ensure the application works when run on a TPU.
Args:
shape: The shape for the input layer to use. (Does not include a dimension
for the batch size).
Returns:
An input layer for the specified shape that internally uses a
placeholder_with_default.
"""
default_size = 224
default_batch_size = 1
shape = list(shape)
default_shape = [default_size if dim is None else dim for dim in shape]
input_tensor = tf.constant(0.0, shape=[default_batch_size] + default_shape)
placeholder_with_default = tf.placeholder_with_default(
input=input_tensor, shape=[None] + shape)
return model_utils.input_layer(shape, placeholder_with_default)
# pylint: disable=unused-argument
def ReLU(self, *args, **kwargs):
"""Builds an activation layer.
Overrides the Keras application ReLU with the activation specified by the
Object Detection configuration.
Args:
*args: Ignored, required to match the `tf.keras.ReLU` interface
**kwargs: Only the name is used,
required to match `tf.keras.ReLU` interface
Returns:
An activation layer specified by the Object Detection hyperparameter
configurations.
"""
name = kwargs.get('name')
if self._conv_hyperparams:
return self._conv_hyperparams.build_activation_layer(name=name)
else:
return tf.keras.layers.Lambda(tf.nn.relu6, name=name)
# pylint: enable=unused-argument
# pylint: disable=unused-argument
def ZeroPadding2D(self, **kwargs):
"""Replaces explicit padding in the Keras application with a no-op.
Args:
**kwargs: Ignored, required to match the Keras applications usage.
Returns:
A no-op identity lambda.
"""
return lambda x: x
# pylint: enable=unused-argument
# Forward all non-overridden methods to the keras layers
def __getattr__(self, item):
return getattr(tf.keras.layers, item)
def mobilenet_v2(batchnorm_training,
default_batchnorm_momentum=0.9997,
conv_hyperparams=None,
use_explicit_padding=False,
alpha=1.0,
min_depth=None,
conv_defs=None,
**kwargs):
"""Instantiates the MobileNetV2 architecture, modified for object detection.
This wraps the MobileNetV2 tensorflow Keras application, but uses the
Keras application's kwargs-based monkey-patching API to override the Keras
architecture with the following changes:
- Changes the default batchnorm momentum to 0.9997
- Applies the Object Detection hyperparameter configuration
- Supports FreezableBatchNorms
- Adds support for a min number of filters for each layer
- Makes the `alpha` parameter affect the final convolution block even if it
is less than 1.0
- Adds support for explicit padding of convolutions
- Makes the Input layer use a tf.placeholder_with_default instead of a
tf.placeholder, to work on TPUs.
Args:
batchnorm_training: Bool. Assigned to Batch norm layer `training` param
when constructing `freezable_batch_norm.FreezableBatchNorm` layers.
default_batchnorm_momentum: Float. When 'conv_hyperparams' is None,
batch norm layers will be constructed using this value as the momentum.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops. Optionally set to `None`
to use default mobilenet_v2 layer builders.
use_explicit_padding: If True, use 'valid' padding for convolutions,
but explicitly pre-pads inputs so that the output dimensions are the
same as if 'same' padding were used. Off by default.
alpha: The width multiplier referenced in the MobileNetV2 paper. It
modifies the number of filters in each convolutional layer.
min_depth: Minimum number of filters in the convolutional layers.
conv_defs: Network layout to specify the mobilenet_v2 body. Default is
`None` to use the default mobilenet_v2 network layout.
**kwargs: Keyword arguments forwarded directly to the
`tf.keras.applications.MobilenetV2` method that constructs the Keras
model.
Returns:
A Keras model instance.
"""
layers_override = _LayersOverride(
batchnorm_training,
default_batchnorm_momentum=default_batchnorm_momentum,
conv_hyperparams=conv_hyperparams,
use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=alpha,
conv_defs=conv_defs)
return tf.keras.applications.MobileNetV2(alpha=alpha,
layers=layers_override,
**kwargs)
# pylint: enable=invalid-name
|
|
#!/bin/python
#NOTE: This script only runs in Linux and requires pdflatex in order to create pdfs
import argparse
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
#The default ports to scan in none are specified
default_ports = [5,8,20,21,22,23,25,53,67,68,69,79,80,88,110,123,135,137,138,139,143,161,162,179,360,389,427,443,445,464,515,546,593,631,636,989,990,1022,1023,1025,1026,1039,1070,1234,2222,3268,3389,8000,8080,8081,8888]
#Maps port numbers to services
PORT_LOOKUP = dict((TCP_SERVICES[k], k) for k in TCP_SERVICES.keys())
#ANSI color codes for colored output
NORMAL = "\033[0m";
BOLD = "\033[1m";
RED = "\033[31m";
GREEN = "\033[32m";
YELLOW = "\033[33m";
BLUE = "\033[34m";
PURPLE = "\033[35m";
CYAN = "\033[36m";
WHITE = "\033[39m";
#Verbose mode
verboseMessages = True
#Whether or not to make a pdf of the output
create_pdf = False
def printMessage(text, formatting = WHITE):
print(formatting + text + WHITE + NORMAL)
#Output to tex file if creating pdf
if create_pdf:
#Turn bold messages into section titles
if BOLD in formatting:
os.system("echo \"\section{" + text.replace("_", "\_") + "}\" >> temp.tex")
else:
os.system("echo -n \'" + text.replace("_", "\_") + '\\' + "\' >> temp.tex")
os.system("echo \'\\\' >> temp.tex")
#Prints a message if in verbose mode, otherwise does nothing
def verboseMessage(output, formatting = ""):
if (verboseMessages):
printMessage(output, formatting)
#Represents a machine and has an ip address and a list of ports
class Target:
def __init__(self, host, ports):
self.host = host
self.ports = ports
def toString(self):
output = "";
for port in self.ports:
output = output + self.host + ":" + str(port) + "\n"
return output[:-1]
#Run ICMP Scan on this machine
def runICMP(self, sender):
verboseMessage("Scanning " + self.host)
#Make sure you have the correct permissions to run the scan
try:
reply = sr1(IP(dst=self.host)/ICMP(), timeout = 1, verbose=0)
except PermissionError:
printMessage("ERROR: You do not have the needed permissions to run this scan, try running the script as root", RED)
return
if (reply):
printMessage(self.host + " is up")
else:
verboseMessage(self.host + " could not be reached", RED)
#Run TCP Scan on this machine
def runTCP(self, sender):
global PORT_LOOKUP
send_port = int(sender.ports[-1])
#Run scan on each port
for port in self.ports:
#Make sure you have the correct permissions to run the scan
try:
reply = sr1(IP(dst=self.host)/TCP(sport=send_port, dport=int(port)), timeout = 1, verbose=0)
except PermissionError:
printMessage("ERROR: You do not have the needed permissions to run this scan, try running the script as root", RED)
return
if reply:
flags = reply.getlayer(TCP).flags
if flags == 18: #SYNACK = 18
try:
printMessage(self.host + ":" + port + " is open [" + PORT_LOOKUP[int(port)] +"]")
except KeyError:
printMessage(self.host + ":" + port + " is open")
else:
verboseMessage(self.host + ":" + port + " is closed", RED)
else:
verboseMessage(self.host + ":" + port + " is closed", RED)
#Run UDP Scan on this machine
def runUDP(self, sender):
send_port = int(sender.ports[-1])
#Run scan on each port
for port in self.ports:
#Make sure you have the correct permissions to run the scan
try:
reply = sr1(IP(dst=self.host)/UDP(sport=send_port, dport=int(port)), timeout = 1, verbose=0, retry=3)
except PermissionError:
printMessage("ERROR: You do not have the needed permissions to run this scan, try running the script as root", RED)
return
if reply:
verboseMessage(self.host + ":" + port + " is closed", RED)
else:
try:
printMessage(self.host + ":" + port + " is open [" + PORT_LOOKUP[int(port)] +"]")
except KeyError:
printMessage(self.host + ":" + port + " is open")
#Run Christmas Tree Scan on this machine
def runChristmas(self, sender):
global PORT_LOOKUP
send_port = int(sender.ports[-1])
#Run scan on each port
for port in self.ports:
#Make sure you have the correct permissions to run the scan
try:
reply = sr1(IP(dst=self.host)/TCP(sport=send_port, dport=int(port), flags="FPU"), timeout = 1, verbose=0)
except PermissionError:
printMessage("ERROR: You do not have the needed permissions to run this scan, try running the script as root", RED)
return
if reply is None:
#Port is open|filtered
try:
printMessage(self.host + ":" + port + " is open|filtered [" + PORT_LOOKUP[int(port)] +"]")
except KeyError:
printMessage(self.host + ":" + port + " is open")
elif reply.haslayer(TCP):
if reply.getlayer(TCP).flags == 20:
#Port is closed
verboseMessage(self.host + ":" + port + " is closed", RED)
elif reply.haslayer(ICMP):
if int(reply.getlayer(ICMP).type) == 3 and int(reply.getlayer(ICMP).code) in [1,2,3,9,10,13]:
#Port is filtered
try:
printMessage(self.host + ":" + port + " is filtered [" + PORT_LOOKUP[int(port)] +"]")
except KeyError:
printMessage(self.host + ":" + port + " is open")
#Run traceroute on this machine
def runTraceroute(self, sender):
verboseMessage("Performing traceroute on " + self.host)
for hops in range(1, 30):
#Make sure you have the correct permissions to run the scan
try:
reply = sr1(IP(dst=self.host, ttl=hops)/ICMP(), timeout = 1, verbose=0)
except PermissionError:
printMessage("ERROR: You do not have the needed permissions to run this scan, try running the script as root", RED)
return
if reply is None:
break
elif reply.src == self.host:
#Reached destination
if hops == 1:
printMessage("1 hop away: " + reply.src + " Done!")
else:
printMessage(str(hops) + " hops away: " + reply.src + " Done!")
break
else:
#Hasn't reached destination yet
if hops == 1:
printMessage("1 hop away: " + reply.src)
else:
printMessage(str(hops) + " hops away: " + reply.src)
class Main(object):
def __init__(self):
self.parse_options()
self.run()
self.filename = None
def parse_options(self):
parser = argparse.ArgumentParser()
#Create command line arguments
parser.add_argument("ip_address", nargs="+", type=str, default = "",
help="IP address(es) to scan")
parser.add_argument("-O", "--create-pdf", type=str, dest="create_pdf", default = "",
help="Create a pdf with the given output")
parser.add_argument("-g", "--source-port", type=int, dest="source_port", default = 443,
help="Source port")
parser.add_argument("-S", "--source-address", type=str, dest="source_address", default = "",
help="Source address (for spoofing)")
parser.add_argument("-sn", "--icmp", dest="use_icmp", action="store_true", help="Run ICMP scan (Default if no type is specified)")
parser.add_argument("-sT", "--tcp", dest="use_tcp", action="store_true", help="Run TCP syn scan")
parser.add_argument("-sX", "--christmas", dest="use_christmas", action="store_true", help="Run christmas tree scan")
parser.add_argument("-sU", "--udp", dest="use_udp", action="store_true", help="Run UDP scan")
parser.add_argument("-T", "--traceroute", dest="traceroute", action="store_true", help="Perform a traceroute")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Verbose output")
#Set default values for command line arguments
parser.set_defaults(use_icmp=False)
parser.set_defaults(use_tcp=False)
parser.set_defaults(use_christmas=False)
parser.set_defaults(use_udp=False)
parser.set_defaults(traceroute=False)
parser.set_defaults(traceroute=False)
args = parser.parse_args()
#Copy over command line arguments
self.ip_addresses = args.ip_address
self.create_pdf = args.create_pdf
self.source_port = args.source_port
self.source_address = args.source_address
self.use_icmp = args.use_icmp
self.use_tcp = args.use_tcp
self.use_christmas = args.use_christmas
self.use_udp = args.use_udp
self.traceroute = args.traceroute
self.verbose = args.verbose
global verboseMessages
verboseMessages= args.verbose
def parseIP(self, full_address):
targets = []
ip_address = full_address.split(":")[0]
#Parse the mask if there is one
if "/" in ip_address:
try:
mask = int(ip_address.split("/")[1])
if mask < 0 or mask > 32:
printMessage("Error: Invalid mask (" + ip_address + ")", RED)
return []
except ValueError:
printMessage("Error: Invalid mask (" + ip_address + ")", RED)
return []
else:
mask = 32
#Parse the ip address
ip_address_parts = ip_address.split("/")[0].split(".")
if (len(ip_address_parts) != 4):
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
try:
current_ip_addresses = []
i = -1
for strpart in ip_address_parts:
temp_ip_addresses = []
number_of_addresses = len(current_ip_addresses)
i = i + 1
ip_range_comma_parts = strpart.split(",")
for comma_part in ip_range_comma_parts:
# If it has dashes
if "-" in comma_part:
ip_range_dash_parts = comma_part.split("-")
# Bad number of commas
if (len(ip_range_dash_parts) != 2):
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
else:
start = int(ip_range_dash_parts[0])
end = int(ip_range_dash_parts[1]) + 1
if (start > end):
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
else:
for part in range(start, end):
if part < 1 or part > 255:
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
else:
if part < 1 or part > 255:
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
else:
#Check if it is the first octet
if i == 0:
temp_ip_addresses.append(part)
else:
for j in range(len(current_ip_addresses)):
temp_ip_addresses.append((current_ip_addresses[j] << 8) + part)
# If it doesn't have any dashes
else:
part = int(comma_part)
if part < 1 or part > 255:
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
else:
#Check if it is the first octet
if i == 0:
temp_ip_addresses.append(part)
else:
for j in range(number_of_addresses):
temp_ip_addresses.append((current_ip_addresses[j] << 8) + part)
current_ip_addresses = temp_ip_addresses
# Sort and remove duplicates
temp = []
for current_ip_address in current_ip_addresses:
bitmask = (0xFFFFFFFF >> mask) & 0xFFFFFFFF
for ip in range(current_ip_address & (~bitmask), (current_ip_address | bitmask) + 1):
ip_address = "." + str(ip & 255)
ip = ip >> 8
ip_address = "." + str(ip & 255) + ip_address
ip = ip >> 8
ip_address = "." + str(ip & 255) + ip_address
ip = ip >> 8
ip_address = str(ip & 255) + ip_address
temp.append(ip_address)
current_ip_addresses = sorted(set(temp))
except ValueError:
printMessage("Error: Invalid IP address (" + ip_address + ")", RED)
return []
#Parse the ports
full_ports = full_address.split(":")[1:]
ports = []
try:
if ":" in full_address:
for full_port in full_ports:
for comma_port in full_port.split(","):
if "-" in comma_port:
dash_ports = comma_port.split("-")
if len(dash_ports) != 2:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
else:
start = int(dash_ports[0])
end = int(dash_ports[1]) + 1
if start >= end:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
else:
for j in range(start, end):
if j < 1 or j > 65535:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
ports.append(j)
else:
if comma_port != "":
int_port = int(comma_port)
if int_port < 1 or int_port > 65535:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
ports.append(int_port)
else:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
else:
for port in default_ports:
ports.append(port)
except ValueError:
printMessage("Error: Invalid port (" + ip_address + ")", RED)
return []
temp = sorted(set(ports))
ports = []
for port in temp:
ports.append(str(port))
for current_ip_address in current_ip_addresses:
targets.append(Target(current_ip_address, ports))
return targets
def run(self):
#Default to icmp if no type of scan is specified
if (not (self.use_icmp or self.use_tcp or self.use_udp or self.use_christmas or self.traceroute)):
self.use_icmp = True
if (self.create_pdf != ""):
verboseMessage("Creating was selected (" + self.create_pdf + ")")
#Make sure pdflatex is installed to generate the pdf
if os.system("which pdflatex &>/dev/null") == 0:
global create_pdf
create_pdf = True
#Delete any file that may be where the destination pdf is
os.system("rm -f " + self.create_pdf)
else:
printMessage("Error: pdflatex not found, not generating pdf of output", RED)
self.create_pdf = ""
#Add headers to tex file to generate pdf
os.system("echo \"\documentclass{article}\" > temp.tex")
os.system("echo \"\\begin{document}\" >> temp.tex")
if (self.source_port != ""):
verboseMessage("Using port " + str(self.source_port))
#Set source port
if (self.source_address != ""):
verboseMessage("Using address " + self.source_address)
else:
self.source_address = "BOGUS" #TODO: Replace with actual ip address
if (self.use_icmp):
verboseMessage("Using ICMP")
if (self.use_tcp):
verboseMessage("Using TCP")
if (self.use_udp):
verboseMessage("Using UDP")
if (self.use_christmas):
verboseMessage("Using christmas tree scan")
if (self.traceroute):
verboseMessage("Performing a traceroute")
#Process ip addresses from input
targets = []
for ip_address in self.ip_addresses:
new_targets = self.parseIP(ip_address)
for target in new_targets:
targets.append(target)
if (self.source_address == "BOGUS"):
source_address = IP().src
else:
source_address = self._source_address
source_port = []
source_port.append(str(self.source_port))
source = Target(source_address, source_port)
verboseMessage("Targets:", BOLD + BLUE)
for target in targets:
verboseMessage(target.toString())
if (self.use_icmp):
printMessage("[Starting ICMP scans]", BOLD + BLUE)
for target in targets:
target.runICMP(source)
if (self.traceroute):
printMessage("[Starting Traceroute]", BOLD + BLUE)
for target in targets:
target.runTraceroute(source)
if (self.use_tcp):
printMessage("[Starting TCP scans]", BOLD + BLUE)
for target in targets:
target.runTCP(source)
if (self.use_udp):
printMessage("[Starting UDP scans]", BOLD + BLUE)
for target in targets:
target.runUDP(source)
if (self.use_christmas):
printMessage("[Starting Christmas Tree scan]", BOLD + BLUE)
for target in targets:
target.runChristmas(source)
#Add the footer to the tex file and generate the pdf
if (self.create_pdf != ""):
os.system("echo \"\\end{document}\" >> temp.tex")
os.system("pdflatex temp.tex &>/dev/null")
os.system("mv temp.pdf " + self.create_pdf)
os.system("rm temp.tex")
os.system("rm temp.aux")
os.system("rm temp.log")
if __name__ == '__main__':
m = Main()
|
|
import tornado.web
import tornado.auth
import json
import requests
import os
import sys
from datetime import datetime, timedelta
from dateutil import parser
#########################
# Useful misc handlers #
#########################
ERROR_CODES = {
400: 'Bad Request',
401: 'Unauthorized',
403: 'Forbidden',
404: 'Page Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
414: 'Request-URI Too Long',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
511: 'Network Authentication Required'
}
class User(object):
"""A minimal user class """
def __init__(self, name, email, roles):
self.name = name
self.email = email
self.roles = roles
@property
def is_admin(self):
return 'admin' in self.roles
@property
def is_pricing_admin(self):
return 'pricing_admin' in self.roles
@property
def is_sample_requirements_admin(self):
return 'sample_requirements_admin' in self.roles
@property
def is_any_admin(self):
return self.is_admin or self.is_pricing_admin or self.is_sample_requirements_admin
class BaseHandler(tornado.web.RequestHandler):
"""Base Handler. Handlers should not inherit from this
class directly but from either SafeHandler or UnsafeHandler
to make security status explicit.
"""
def get(self):
""" The GET method on this handler will be overwritten by all other handler.
As it is the default handler used to match any request that is not mapped
in the main app, a 404 error will be raised in that case (because the get method
won't be overwritten in that case)
"""
raise tornado.web.HTTPError(404, reason='Page not found')
def get_current_user(self):
# Disables authentication if test mode to ease integration testing
if self.application.test_mode:
name = 'Testing User!'
roles = ['admin', 'pricing_admin', 'sample_requirements_admin']
email = 'Testing User!'
else:
name = str(self.get_secure_cookie("user"), 'utf-8') if self.get_secure_cookie("user") else None
# Fix ridiculous bug with quotation marks showing on the web
if name:
if (name[0] == '"') and (name[-1] == '"'):
name = name[1:-1]
roles = json.loads(str(self.get_secure_cookie("roles"), 'utf-8')) if self.get_secure_cookie("roles") else ['user']
email = str(self.get_secure_cookie("email"), 'utf-8') if self.get_secure_cookie("email") else None
user = User(name, email, roles)
if user.name:
return user
else:
return None
def write_error(self, status_code, **kwargs):
""" Overwrites write_error method to have custom error pages.
http://tornado.readthedocs.org/en/latest/web.html#tornado.web.RequestHandler.write_error
"""
reason = 'Unknown Error'
# Get information about the triggered exception
self.application.gs_globals['exception_fulltext'] = repr(sys.exc_info())
# Get the status code and error reason
if status_code in list(ERROR_CODES):
reason = ERROR_CODES[status_code]
try:
if 'exc_info' in kwargs:
_, error, _ = kwargs['exc_info']
reason = error.reason
except AttributeError:
pass
# Return JSON if this is an API call
if '/api/v1/' in self.request.uri:
jsondict = {
'page_title': "Error {}: {}".format(status_code, reason),
'error_status': status_code,
'error_reason': reason,
'error_exception': self.application.gs_globals['exception_fulltext']
}
self.set_header("Content-type", "application/json")
self.write(json.dumps(jsondict))
# Render the error template
else:
t = self.application.loader.load("error_page.html")
self.write(t.generate(gs_globals=self.application.gs_globals, status=status_code, reason=reason, user=self.get_current_user()))
def get_multiqc(self, project_id):
"""
Getting multiqc reports for requested project from the filesystem
Returns a string containing html if report exists, otherwise None
"""
view = self.application.projects_db.view('project/id_name_dates')
rows = view[project_id].rows
project_name = ''
# get only the first one
for row in rows:
project_name = row.value.get('project_name', '')
break
if project_name:
multiqc_name = '{}_multiqc_report.html'.format(project_name)
multiqc_path = self.application.multiqc_path or ''
multiqc_path = os.path.join(multiqc_path, multiqc_name)
if os.path.exists(multiqc_path):
with open(multiqc_path, 'r', encoding='utf-8') as multiqc_file:
html = multiqc_file.read()
return html
return None
class SafeHandler(BaseHandler):
""" All handlers that need authentication and authorization should inherit
from this class.
"""
@tornado.web.authenticated
def prepare(self):
"""This method is called before any other method.
Having the decorator @tornado.web.authenticated here implies that all
the Handlers that inherit from this one are going to require
authentication in all their methods.
"""
pass
class UnsafeHandler(BaseHandler):
pass
class MainHandler(UnsafeHandler):
""" Serves the html front page upon request.
"""
def get(self):
t = self.application.loader.load("index.html")
user = self.get_current_user()
# Avoids pulling all historic data by assuming we have less than 30 NAS:es
view = self.application.server_status_db.view('nases/by_timestamp', descending=True, limit=30)
latest = max([parser.parse(row.key) for row in view.rows])
# assuming that status db is not being updated more often than every 5 minutes
reduced_rows = [row for row in view.rows if latest - parser.parse(row.key) <= timedelta(minutes=5)]
instruments = self.application.server_status['instruments']
server_status = {}
for row in reduced_rows:
server = row.value.get('name')
if server is None:
continue
if server not in server_status:
server_status[server] = row.value
server_status[server]['instrument'] = instruments.get(server, '-')
used_percentage = float(row.value.get('used_percentage', '0').replace('%',''))
if used_percentage > 60:
server_status[server]['css_class'] = 'q-warning'
elif used_percentage > 80:
server_status[server]['css_class'] = 'q-danger'
else:
server_status[server]['css_class'] = ''
# sort by used space
server_status = sorted(server_status.items(), key = lambda item: item[1].get('used_percentage'), reverse=True)
self.write(t.generate(gs_globals=self.application.gs_globals,
user=user, server_status=server_status))
def dthandler(obj):
"""ISO formatting for datetime to be used in JSON.
"""
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError("Object can not be isoformatted.")
################################
# Useful data-serving handlers #
################################
class DataHandler(UnsafeHandler):
""" Serves a listing of all available URL's in the web service.
"""
def get(self):
self.set_header("Content-type", "application/json")
handlers = []
for h in self.application.declared_handlers:
try:
handlers.append(h[0])
except TypeError: # 'URLSpec' object does not support indexing
handlers.append(h.regex.pattern)
api = [h for h in handlers if h.startswith("/api")]
utils = [h for h in handlers if h == "/login" or h == "/logout" or h == ".*"]
pages = list(set(handlers).difference(set(api)).difference(set(utils)))
pages = [h for h in pages if not (h.endswith("?") or h.endswith("$"))]
pages.sort(reverse=True)
api.sort(reverse=True)
self.write(json.dumps({"api": api, "pages": pages}))
class UpdatedDocumentsDatahandler(SafeHandler):
""" Serves a list of references to the last updated documents in the
databases Status gets data from.
Specify to get the <n> latest items by ?items=<n>.
Loaded through /api/v1/last_updated
"""
def get(self):
num_items = int(self.get_argument("items", 25))
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.list_updated(num_items)))
def list_updated(self, num_items=25):
last = []
view = self.application.projects_db.view("time/last_updated",
limit=num_items, descending=True)
for doc in view:
last.append((doc.key, doc.value, 'Project information'))
view = self.application.flowcells_db.view("time/last_updated",
limit=num_items, descending=True)
for doc in view:
last.append((doc.key, doc.value, 'Flowcell information'))
last = sorted(last, key=lambda tr: tr[0], reverse=True)
return last[:num_items]
class PagedQCDataHandler(SafeHandler):
""" Serves a list of 50 sample names following a given string
in alhabetical order.
loaded through /api/v1/samples/start/([^/]*)$
"""
def get(self, startkey):
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.list_samples(startkey)))
def list_samples(self, startkey):
sample_list = []
view = self.application.samples_db.view("names/samplename_run",
group_level=1,
limit=50,
startkey=startkey)
for row in view:
sample_list.append(row.key)
return sample_list
class NoCacheStaticFileHandler(tornado.web.StaticFileHandler):
""" Serves up static files without any tornado caching.
https://gist.github.com/omarish/5499385
"""
def set_extra_headers(self, path):
self.set_header("Cache-control", "no-cache")
class LastPSULRunHandler(SafeHandler):
"""Gives the date of the last PSUL run, assumin the logfile is where we expect it"""
def get(self):
logfile=self.application.psul_log
response = {}
try:
text_timestamp = os.stat(logfile).st_mtime
delta = datetime.now() - datetime.fromtimestamp(int(text_timestamp))
except (OSError, KeyError, TypeError):
response['status'] = "Log File '{}' not found.".format(logfile)
else:
response['status'] = "Success"
response['hours'] = int(delta.seconds/3600)
response['minutes'] = int((delta.seconds%3600)/60)
response['seconds'] = int(delta.seconds%60)
self.set_header("Content-type", "application/json")
self.write(json.dumps(response))
########################
# Other useful classes #
########################
class GoogleUser(object):
"""Stores the information that google returns from a user throuhgh its secured API.
"""
def __init__(self, user_token):
assert 'access_token' in user_token
self.user_token = user_token
self._google_plus_api = "https://www.googleapis.com/plus/v1/people/me"
#Fetch actual information from Google API
params = {'access_token': self.user_token.get('access_token')}
r = requests.get(self._google_plus_api, params=params)
if not r.status_code == requests.status_codes.codes.OK:
self.authenticated = False
else:
self.authenticated = True
info = json.loads(r.text)
self.display_name = info.get('displayName', '')
self.emails = [email['value'] for email in info.get('emails')]
def is_authorized(self, user_view):
"""Checks that the user is actually authorised to use genomics-status.
"""
authenticated = False
for email in self.emails:
if user_view[email]:
self.valid_email = email
authenticated = True
return authenticated
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from mox3 import mox
import netaddr
from oslo_db import exception as db_exc
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.objects import instance
from nova.objects import instance_info_cache
from nova.objects import pci_device
from nova.objects import security_group
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova.tests.unit.objects import test_instance_numa_topology
from nova.tests.unit.objects import test_instance_pci_requests
from nova.tests.unit.objects import test_migration_context as test_mig_ctxt
from nova.tests.unit.objects import test_objects
from nova.tests.unit.objects import test_security_group
from nova.tests.unit.objects import test_vcpu_model
from nova import utils
class _TestInstanceObject(object):
@property
def fake_instance(self):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['uuid'] = '34fd7606-2ed5-42c7-ad46-76240c088801'
db_inst['cell_name'] = 'api!child'
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['deleted'] = False
db_inst['security_groups'] = []
db_inst['pci_devices'] = []
db_inst['user_id'] = self.context.user_id
db_inst['project_id'] = self.context.project_id
db_inst['tags'] = []
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
return db_inst
def test_datetime_deserialization(self):
red_letter_date = timeutils.parse_isotime(
utils.isotime(datetime.datetime(1955, 11, 5)))
inst = objects.Instance(uuid='fake-uuid', launched_at=red_letter_date)
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': 'fake-uuid',
'launched_at': '1955-11-05T00:00:00Z'},
'nova_object.changes': ['launched_at', 'uuid']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.launched_at, datetime.datetime)
self.assertEqual(red_letter_date, inst2.launched_at)
def test_ip_deserialization(self):
inst = objects.Instance(uuid='fake-uuid', access_ip_v4='1.2.3.4',
access_ip_v6='::1')
primitive = inst.obj_to_primitive()
expected = {'nova_object.name': 'Instance',
'nova_object.namespace': 'nova',
'nova_object.version': inst.VERSION,
'nova_object.data':
{'uuid': 'fake-uuid',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '::1'},
'nova_object.changes': ['uuid', 'access_ip_v6',
'access_ip_v4']}
self.assertJsonEqual(primitive, expected)
inst2 = objects.Instance.obj_from_primitive(primitive)
self.assertIsInstance(inst2.access_ip_v4, netaddr.IPAddress)
self.assertIsInstance(inst2.access_ip_v6, netaddr.IPAddress)
self.assertEqual(netaddr.IPAddress('1.2.3.4'), inst2.access_ip_v4)
self.assertEqual(netaddr.IPAddress('::1'), inst2.access_ip_v6)
def test_get_without_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, 'uuid',
columns_to_join=[],
use_slave=False
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, 'uuid',
expected_attrs=[])
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertFalse(inst.obj_attr_is_set(attr))
def test_get_with_expected(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
self.mox.StubOutWithMock(
db, 'instance_extra_get_by_instance_uuid')
exp_cols = instance.INSTANCE_OPTIONAL_ATTRS[:]
exp_cols.remove('fault')
exp_cols.remove('numa_topology')
exp_cols.remove('pci_requests')
exp_cols.remove('vcpu_model')
exp_cols.remove('ec2_ids')
exp_cols.remove('migration_context')
exp_cols = list(filter(lambda x: 'flavor' not in x, exp_cols))
exp_cols.extend(['extra', 'extra.numa_topology', 'extra.pci_requests',
'extra.flavor', 'extra.vcpu_model',
'extra.migration_context'])
fake_topology = (test_instance_numa_topology.
fake_db_topology['numa_topology'])
fake_requests = jsonutils.dumps(test_instance_pci_requests.
fake_pci_requests)
fake_flavor = jsonutils.dumps(
{'cur': objects.Flavor().obj_to_primitive(),
'old': None, 'new': None})
fake_vcpu_model = jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive())
fake_mig_context = jsonutils.dumps(
test_mig_ctxt.fake_migration_context_obj.obj_to_primitive())
fake_instance = dict(self.fake_instance,
extra={
'numa_topology': fake_topology,
'pci_requests': fake_requests,
'flavor': fake_flavor,
'vcpu_model': fake_vcpu_model,
'migration_context': fake_mig_context,
})
db.instance_get_by_uuid(
self.context, 'uuid',
columns_to_join=exp_cols,
use_slave=False
).AndReturn(fake_instance)
fake_faults = test_instance_fault.fake_faults
db.instance_fault_get_by_instance_uuids(
self.context, [fake_instance['uuid']]
).AndReturn(fake_faults)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(
self.context, 'uuid',
expected_attrs=instance.INSTANCE_OPTIONAL_ATTRS)
for attr in instance.INSTANCE_OPTIONAL_ATTRS:
self.assertTrue(inst.obj_attr_is_set(attr))
def test_get_by_id(self):
self.mox.StubOutWithMock(db, 'instance_get')
db.instance_get(self.context, 'instid',
columns_to_join=['info_cache',
'security_groups']
).AndReturn(self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_id(self.context, 'instid')
self.assertEqual(self.fake_instance['uuid'], inst.uuid)
def test_load(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(self.fake_instance)
fake_inst2 = dict(self.fake_instance,
metadata=[{'key': 'foo', 'value': 'bar'}])
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['metadata'],
use_slave=False
).AndReturn(fake_inst2)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertFalse(hasattr(inst, '_obj_metadata'))
meta = inst.metadata
self.assertEqual({'foo': 'bar'}, meta)
self.assertTrue(hasattr(inst, '_obj_metadata'))
# Make sure we don't run load again
meta2 = inst.metadata
self.assertEqual({'foo': 'bar'}, meta2)
def test_load_invalid(self):
inst = objects.Instance(context=self.context, uuid='fake-uuid')
self.assertRaises(exception.ObjectActionError,
inst.obj_load_attr, 'foo')
def test_get_remote(self):
# isotime doesn't have microseconds and is always UTC
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_instance = self.fake_instance
db.instance_get_by_uuid(self.context, 'fake-uuid',
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_instance)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, 'fake-uuid')
self.assertEqual(fake_instance['id'], inst.id)
self.assertEqual(fake_instance['launched_at'],
inst.launched_at.replace(tzinfo=None))
self.assertEqual(fake_instance['access_ip_v4'],
str(inst.access_ip_v4))
self.assertEqual(fake_instance['access_ip_v6'],
str(inst.access_ip_v6))
def test_refresh(self):
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fake_uuid = self.fake_instance['uuid']
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(dict(self.fake_instance,
host='orig-host'))
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(dict(self.fake_instance,
host='new-host'))
self.mox.StubOutWithMock(instance_info_cache.InstanceInfoCache,
'refresh')
instance_info_cache.InstanceInfoCache.refresh()
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual('orig-host', inst.host)
inst.refresh()
self.assertEqual('new-host', inst.host)
self.assertEqual(set([]), inst.obj_what_changed())
def test_refresh_does_not_recurse(self):
inst = objects.Instance(context=self.context, uuid='fake-uuid',
metadata={})
inst_copy = objects.Instance()
inst_copy.uuid = inst.uuid
self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid')
objects.Instance.get_by_uuid(self.context, uuid=inst.uuid,
expected_attrs=['metadata'],
use_slave=False
).AndReturn(inst_copy)
self.mox.ReplayAll()
self.assertRaises(exception.OrphanedObjectError, inst.refresh)
def _save_test_helper(self, cell_type, save_kwargs):
"""Common code for testing save() for cells/non-cells."""
if cell_type:
self.flags(enable=True, cell_type=cell_type, group='cells')
else:
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, host='oldhost', user_data='old',
vm_state='old', task_state='old')
fake_uuid = old_ref['uuid']
expected_updates = dict(vm_state='meow', task_state='wuff',
user_data='new')
new_ref = dict(old_ref, host='newhost', **expected_updates)
exp_vm_state = save_kwargs.get('expected_vm_state')
exp_task_state = save_kwargs.get('expected_task_state')
admin_reset = save_kwargs.get('admin_state_reset', False)
if exp_vm_state:
expected_updates['expected_vm_state'] = exp_vm_state
if exp_task_state:
if (exp_task_state == 'image_snapshot' and
'instance_version' in save_kwargs and
save_kwargs['instance_version'] == '1.9'):
expected_updates['expected_task_state'] = [
'image_snapshot', 'image_snapshot_pending']
else:
expected_updates['expected_task_state'] = exp_task_state
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
cells_api_mock = self.mox.CreateMock(cells_rpcapi.CellsAPI)
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_at_top')
self.mox.StubOutWithMock(cells_api_mock,
'instance_update_from_api')
self.mox.StubOutWithMock(cells_rpcapi, 'CellsAPI',
use_mock_anything=True)
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor']
).AndReturn((old_ref, new_ref))
if cell_type == 'api':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_from_api(
self.context, mox.IsA(objects.Instance),
exp_vm_state, exp_task_state, admin_reset)
elif cell_type == 'compute':
cells_rpcapi.CellsAPI().AndReturn(cells_api_mock)
cells_api_mock.instance_update_at_top(self.context,
mox.IsA(objects.Instance))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'])
if 'instance_version' in save_kwargs:
inst.VERSION = save_kwargs.pop('instance_version')
self.assertEqual('old', inst.task_state)
self.assertEqual('old', inst.vm_state)
self.assertEqual('old', inst.user_data)
inst.vm_state = 'meow'
inst.task_state = 'wuff'
inst.user_data = 'new'
save_kwargs.pop('context', None)
inst.save(**save_kwargs)
self.assertEqual('newhost', inst.host)
self.assertEqual('meow', inst.vm_state)
self.assertEqual('wuff', inst.task_state)
self.assertEqual('new', inst.user_data)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
def test_save(self):
self._save_test_helper(None, {})
def test_save_in_api_cell(self):
self._save_test_helper('api', {})
def test_save_in_compute_cell(self):
self._save_test_helper('compute', {})
def test_save_exp_vm_state(self):
self._save_test_helper(None, {'expected_vm_state': ['meow']})
def test_save_exp_task_state(self):
self._save_test_helper(None, {'expected_task_state': ['meow']})
def test_save_exp_vm_state_api_cell(self):
self._save_test_helper('api', {'expected_vm_state': ['meow']})
def test_save_exp_task_state_api_cell(self):
self._save_test_helper('api', {'expected_task_state': ['meow']})
def test_save_exp_task_state_api_cell_admin_reset(self):
self._save_test_helper('api', {'admin_state_reset': True})
def test_save_rename_sends_notification(self):
# Tests that simply changing the 'display_name' on the instance
# will send a notification.
self.flags(enable=False, group='cells')
old_ref = dict(self.fake_instance, display_name='hello')
fake_uuid = old_ref['uuid']
expected_updates = dict(display_name='goodbye')
new_ref = dict(old_ref, **expected_updates)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(notifications, 'send_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(old_ref)
db.instance_update_and_get_original(
self.context, fake_uuid, expected_updates,
columns_to_join=['info_cache', 'security_groups',
'system_metadata', 'extra', 'extra.flavor']
).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'],
use_slave=False)
self.assertEqual('hello', inst.display_name)
inst.display_name = 'goodbye'
inst.save()
self.assertEqual('goodbye', inst.display_name)
# NOTE(danms): Ignore flavor migrations for the moment
self.assertEqual(set([]), inst.obj_what_changed() - set(['flavor']))
def test_save_related_object_if_none(self):
with mock.patch.object(objects.Instance, '_save_pci_requests'
) as save_mock:
inst = objects.Instance()
inst = objects.Instance._from_db_object(self.context, inst,
self.fake_instance)
inst.pci_requests = None
inst.save()
self.assertTrue(save_mock.called)
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_does_not_refresh_pci_devices(self, mock_fdo, mock_update):
# NOTE(danms): This tests that we don't update the pci_devices
# field from the contents of the database. This is not because we
# don't necessarily want to, but because the way pci_devices is
# currently implemented it causes versioning issues. When that is
# resolved, this test should go away.
mock_update.return_value = None, None
inst = objects.Instance(context=self.context, id=123)
inst.uuid = 'foo'
inst.pci_devices = pci_device.PciDeviceList()
inst.save()
self.assertNotIn('pci_devices',
mock_fdo.call_args_list[0][1]['expected_attrs'])
@mock.patch('nova.db.instance_extra_update_by_uuid')
@mock.patch('nova.db.instance_update_and_get_original')
@mock.patch.object(instance.Instance, '_from_db_object')
def test_save_updates_numa_topology(self, mock_fdo, mock_update,
mock_extra_update):
fake_obj_numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([0]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([1]), memory=128)])
fake_obj_numa_topology.instance_uuid = 'fake-uuid'
jsonified = fake_obj_numa_topology._to_json()
mock_update.return_value = None, None
inst = objects.Instance(
context=self.context, id=123, uuid='fake-uuid')
inst.numa_topology = fake_obj_numa_topology
inst.save()
# NOTE(sdague): the json representation of nova object for
# NUMA isn't stable from a string comparison
# perspective. There are sets which get converted to lists,
# and based on platform differences may show up in different
# orders. So we can't have mock do the comparison. Instead
# manually compare the final parameter using our json equality
# operator which does the right thing here.
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, mock.ANY)
called_arg = mock_extra_update.call_args_list[0][0][2]['numa_topology']
self.assertJsonEqual(called_arg, jsonified)
mock_extra_update.reset_mock()
inst.numa_topology = None
inst.save()
mock_extra_update.assert_called_once_with(
self.context, inst.uuid, {'numa_topology': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_vcpu_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.vcpu_model = test_vcpu_model.fake_vcpumodel
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['vcpu_model'], list(actual_args[0][2].keys()))
self.assertJsonEqual(jsonutils.dumps(
test_vcpu_model.fake_vcpumodel.obj_to_primitive()),
actual_args[0][2]['vcpu_model'])
mock_update.reset_mock()
inst.vcpu_model = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'vcpu_model': None})
@mock.patch('nova.db.instance_extra_update_by_uuid')
def test_save_migration_context_model(self, mock_update):
inst = fake_instance.fake_instance_obj(self.context)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.save()
self.assertTrue(mock_update.called)
self.assertEqual(1, mock_update.call_count)
actual_args = mock_update.call_args
self.assertEqual(self.context, actual_args[0][0])
self.assertEqual(inst.uuid, actual_args[0][1])
self.assertEqual(['migration_context'], list(actual_args[0][2].keys()))
self.assertIsInstance(
objects.MigrationContext.obj_from_db_obj(
actual_args[0][2]['migration_context']),
objects.MigrationContext)
mock_update.reset_mock()
inst.migration_context = None
inst.save()
mock_update.assert_called_once_with(
self.context, inst.uuid, {'migration_context': None})
def test_save_flavor_skips_unchanged_flavors(self):
inst = objects.Instance(context=self.context,
flavor=objects.Flavor())
inst.obj_reset_changes()
with mock.patch('nova.db.instance_extra_update_by_uuid') as mock_upd:
inst.save()
self.assertFalse(mock_upd.called)
@mock.patch.object(notifications, 'send_update')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_from_api')
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_update_at_top')
@mock.patch.object(db, 'instance_update_and_get_original')
def _test_skip_cells_sync_helper(self, mock_db_update, mock_update_at_top,
mock_update_from_api, mock_notif_update, cell_type):
self.flags(enable=True, cell_type=cell_type, group='cells')
inst = fake_instance.fake_instance_obj(self.context, cell_name='fake')
inst.vm_state = 'foo'
inst.task_state = 'bar'
inst.cell_name = 'foo!bar@baz'
old_ref = dict(base.obj_to_primitive(inst), vm_state='old',
task_state='old')
new_ref = dict(old_ref, vm_state='foo', task_state='bar')
newer_ref = dict(new_ref, vm_state='bar', task_state='foo')
mock_db_update.side_effect = [(old_ref, new_ref), (new_ref, newer_ref)]
with inst.skip_cells_sync():
inst.save()
mock_update_at_top.assert_has_calls([])
mock_update_from_api.assert_has_calls([])
self.assertFalse(mock_notif_update.called)
inst.vm_state = 'bar'
inst.task_state = 'foo'
def fake_update_from_api(context, instance, expected_vm_state,
expected_task_state, admin_state_reset):
self.assertEqual('foo!bar@baz', instance.cell_name)
# This is re-mocked so that cell_name can be checked above. Since
# instance objects have no equality testing assert_called_once_with
# doesn't work.
with mock.patch.object(cells_rpcapi.CellsAPI,
'instance_update_from_api',
side_effect=fake_update_from_api) as fake_update_from_api:
inst.save()
self.assertEqual('foo!bar@baz', inst.cell_name)
self.assertTrue(mock_notif_update.called)
if cell_type == 'compute':
mock_update_at_top.assert_called_once_with(self.context, mock.ANY)
# Compare primitives since we can't check instance object equality
expected_inst_p = base.obj_to_primitive(inst)
actual_inst = mock_update_at_top.call_args[0][1]
actual_inst_p = base.obj_to_primitive(actual_inst)
self.assertEqual(expected_inst_p, actual_inst_p)
self.assertFalse(fake_update_from_api.called)
elif cell_type == 'api':
self.assertFalse(mock_update_at_top.called)
fake_update_from_api.assert_called_once_with(self.context,
mock.ANY, None, None, False)
expected_calls = [
mock.call(self.context, inst.uuid,
{'vm_state': 'foo', 'task_state': 'bar',
'cell_name': 'foo!bar@baz'},
columns_to_join=['system_metadata', 'extra',
'extra.flavor']),
mock.call(self.context, inst.uuid,
{'vm_state': 'bar', 'task_state': 'foo'},
columns_to_join=['system_metadata'])]
mock_db_update.assert_has_calls(expected_calls)
def test_skip_cells_api(self):
self._test_skip_cells_sync_helper(cell_type='api')
def test_skip_cells_compute(self):
self._test_skip_cells_sync_helper(cell_type='compute')
def test_get_deleted(self):
fake_inst = dict(self.fake_instance, id=123, deleted=123)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(danms): Make sure it's actually a bool
self.assertTrue(inst.deleted)
def test_get_not_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertFalse(inst.cleaned)
def test_get_cleaned(self):
fake_inst = dict(self.fake_instance, id=123, cleaned=1)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
# NOTE(mikal): Make sure it's actually a bool
self.assertTrue(inst.cleaned)
def test_with_info_cache(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
nwinfo1 = network_model.NetworkInfo.hydrate([{'address': 'foo'}])
nwinfo2 = network_model.NetworkInfo.hydrate([{'address': 'bar'}])
nwinfo1_json = nwinfo1.json()
nwinfo2_json = nwinfo2.json()
fake_info_cache = test_instance_info_cache.fake_info_cache
fake_inst['info_cache'] = dict(
fake_info_cache,
network_info=nwinfo1_json,
instance_uuid=fake_uuid)
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'instance_info_cache_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
db.instance_info_cache_update(self.context, fake_uuid,
{'network_info': nwinfo2_json}).AndReturn(fake_info_cache)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(nwinfo1, inst.info_cache.network_info)
self.assertEqual(fake_uuid, inst.info_cache.instance_uuid)
inst.info_cache.network_info = nwinfo2
inst.save()
def test_with_info_cache_none(self):
fake_inst = dict(self.fake_instance, info_cache=None)
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['info_cache'])
self.assertIsNone(inst.info_cache)
def test_with_security_groups(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['security_groups'] = [
{'id': 1, 'name': 'secgroup1', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
{'id': 2, 'name': 'secgroup2', 'description': 'fake-desc',
'user_id': 'fake-user', 'project_id': 'fake_project',
'created_at': None, 'updated_at': None, 'deleted_at': None,
'deleted': False},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(db, 'security_group_update')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
db.security_group_update(self.context, 1, {'description': 'changed'}
).AndReturn(fake_inst['security_groups'][0])
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(2, len(inst.security_groups))
for index, group in enumerate(fake_inst['security_groups']):
for key in group:
self.assertEqual(group[key],
inst.security_groups[index][key])
self.assertIsInstance(inst.security_groups[index],
security_group.SecurityGroup)
self.assertEqual(set(), inst.security_groups.obj_what_changed())
inst.security_groups[0].description = 'changed'
inst.save()
self.assertEqual(set(), inst.security_groups.obj_what_changed())
def test_with_empty_security_groups(self):
fake_inst = dict(self.fake_instance, security_groups=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['info_cache',
'security_groups'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid)
self.assertEqual(0, len(inst.security_groups))
def test_with_empty_pci_devices(self):
fake_inst = dict(self.fake_instance, pci_devices=[])
fake_uuid = fake_inst['uuid']
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(0, len(inst.pci_devices))
def test_with_pci_devices(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_inst['pci_devices'] = [
{'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 2,
'compute_node_id': 1,
'address': 'a1',
'vendor_id': 'v1',
'numa_node': 0,
'product_id': 'p1',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'extra_info': '{}'},
{
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': None,
'id': 1,
'compute_node_id': 1,
'address': 'a',
'vendor_id': 'v',
'numa_node': 1,
'product_id': 'p',
'dev_type': fields.PciDeviceType.STANDARD,
'status': fields.PciDeviceStatus.ALLOCATED,
'dev_id': 'i',
'label': 'l',
'instance_uuid': fake_uuid,
'request_id': None,
'extra_info': '{}'},
]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=['pci_devices'],
use_slave=False
).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
['pci_devices'])
self.assertEqual(2, len(inst.pci_devices))
self.assertEqual(fake_uuid, inst.pci_devices[0].instance_uuid)
self.assertEqual(fake_uuid, inst.pci_devices[1].instance_uuid)
def test_with_fault(self):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
fake_faults = [dict(x, instance_uuid=fake_uuid)
for x in test_instance_fault.fake_faults['fake-uuid']]
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_by_uuid(self.context, fake_uuid,
columns_to_join=[],
use_slave=False
).AndReturn(self.fake_instance)
db.instance_fault_get_by_instance_uuids(
self.context, [fake_uuid]).AndReturn({fake_uuid: fake_faults})
self.mox.ReplayAll()
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['fault'])
self.assertEqual(fake_faults[0], dict(inst.fault.items()))
@mock.patch('nova.objects.EC2Ids.get_by_instance')
@mock.patch('nova.db.instance_get_by_uuid')
def test_with_ec2_ids(self, mock_get, mock_ec2):
fake_inst = dict(self.fake_instance)
fake_uuid = fake_inst['uuid']
mock_get.return_value = fake_inst
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_ec2.return_value = fake_ec2_ids
inst = objects.Instance.get_by_uuid(self.context, fake_uuid,
expected_attrs=['ec2_ids'])
mock_ec2.assert_called_once_with(self.context, mock.ANY)
self.assertEqual(fake_ec2_ids.instance_id, inst.ec2_ids.instance_id)
def test_iteritems_with_extra_attrs(self):
self.stubs.Set(objects.Instance, 'name', 'foo')
inst = objects.Instance(uuid='fake-uuid')
self.assertEqual(sorted({'uuid': 'fake-uuid',
'name': 'foo',
}.items()), sorted(inst.items()))
def _test_metadata_change_tracking(self, which):
inst = objects.Instance(uuid='fake-uuid')
setattr(inst, which, {})
inst.obj_reset_changes()
getattr(inst, which)['foo'] = 'bar'
self.assertEqual(set([which]), inst.obj_what_changed())
inst.obj_reset_changes()
self.assertEqual(set(), inst.obj_what_changed())
def test_create_skip_scheduled_at(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
scheduled_at=None,
system_metadata={'foo': 'bar'})
inst.create()
self.assertEqual('foo-host', inst.host)
def test_metadata_change_tracking(self):
self._test_metadata_change_tracking('metadata')
def test_system_metadata_change_tracking(self):
self._test_metadata_change_tracking('system_metadata')
def test_create_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_create')
vals = {'host': 'foo-host',
'memory_mb': 128,
'system_metadata': {'foo': 'bar'},
'extra': {}}
fake_inst = fake_instance.fake_db_instance(**vals)
db.instance_create(self.context, vals).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context,
host='foo-host', memory_mb=128,
system_metadata={'foo': 'bar'})
inst.create()
def test_create(self):
self.mox.StubOutWithMock(db, 'instance_create')
db.instance_create(self.context, {'extra': {}}).AndReturn(
self.fake_instance)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context)
inst.create()
self.assertEqual(self.fake_instance['id'], inst.id)
def test_create_with_values(self):
inst1 = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst1.create()
self.assertEqual('foo-host', inst1.host)
inst2 = objects.Instance.get_by_uuid(self.context, inst1.uuid)
self.assertEqual('foo-host', inst2.host)
def test_create_with_extras(self):
inst = objects.Instance(context=self.context,
uuid=self.fake_instance['uuid'],
numa_topology=test_instance_numa_topology.fake_obj_numa_topology,
pci_requests=objects.InstancePCIRequests(
requests=[
objects.InstancePCIRequest(count=123,
spec=[])]),
vcpu_model=test_vcpu_model.fake_vcpumodel,
)
inst.create()
self.assertIsNotNone(inst.numa_topology)
self.assertIsNotNone(inst.pci_requests)
self.assertEqual(1, len(inst.pci_requests.requests))
self.assertIsNotNone(inst.vcpu_model)
got_numa_topo = objects.InstanceNUMATopology.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(inst.numa_topology.instance_uuid,
got_numa_topo.instance_uuid)
got_pci_requests = objects.InstancePCIRequests.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual(123, got_pci_requests.requests[0].count)
vcpu_model = objects.VirtCPUModel.get_by_instance_uuid(
self.context, inst.uuid)
self.assertEqual('fake-model', vcpu_model.model)
def test_recreate_fails(self):
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
host='foo-host')
inst.create()
self.assertRaises(exception.ObjectActionError, inst.create)
def test_create_with_special_things(self):
self.mox.StubOutWithMock(db, 'instance_create')
fake_inst = fake_instance.fake_db_instance()
db.instance_create(self.context,
{'host': 'foo-host',
'security_groups': ['foo', 'bar'],
'info_cache': {'network_info': '[]'},
'extra': {},
}
).AndReturn(fake_inst)
self.mox.ReplayAll()
secgroups = security_group.SecurityGroupList()
secgroups.objects = []
for name in ('foo', 'bar'):
secgroup = security_group.SecurityGroup()
secgroup.name = name
secgroups.objects.append(secgroup)
info_cache = instance_info_cache.InstanceInfoCache()
info_cache.network_info = network_model.NetworkInfo()
inst = objects.Instance(context=self.context,
host='foo-host', security_groups=secgroups,
info_cache=info_cache)
inst.create()
def test_destroy_stubbed(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
deleted_at = datetime.datetime(1955, 11, 6)
fake_inst = fake_instance.fake_db_instance(deleted_at=deleted_at,
deleted=True)
db.instance_destroy(self.context, 'fake-uuid',
constraint=None).AndReturn(fake_inst)
self.mox.ReplayAll()
inst = objects.Instance(context=self.context, id=1, uuid='fake-uuid',
host='foo')
inst.destroy()
self.assertEqual(timeutils.normalize_time(deleted_at),
timeutils.normalize_time(inst.deleted_at))
self.assertTrue(inst.deleted)
def test_destroy(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance(context=self.context, id=db_inst['id'],
uuid=db_inst['uuid'])
inst.destroy()
self.assertRaises(exception.InstanceNotFound,
db.instance_get_by_uuid, self.context,
db_inst['uuid'])
def test_destroy_host_constraint(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
inst.host = None
self.assertRaises(exception.ObjectActionError,
inst.destroy)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_cell_sync_to_top(self, mock_destroy, mock_destroy_at_top):
self.flags(enable=True, cell_type='compute', group='cells')
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1, uuid='fake-uuid')
inst.destroy()
mock_destroy_at_top.assert_called_once_with(self.context, mock.ANY)
actual_inst = mock_destroy_at_top.call_args[0][1]
self.assertIsInstance(actual_inst, instance.Instance)
@mock.patch.object(cells_rpcapi.CellsAPI, 'instance_destroy_at_top')
@mock.patch.object(db, 'instance_destroy')
def test_destroy_no_cell_sync_to_top(self, mock_destroy,
mock_destroy_at_top):
fake_inst = fake_instance.fake_db_instance(deleted=True)
mock_destroy.return_value = fake_inst
inst = objects.Instance(context=self.context, id=1, uuid='fake-uuid')
inst.destroy()
self.assertFalse(mock_destroy_at_top.called)
def test_name_does_not_trigger_lazy_loads(self):
values = {'user_id': self.context.user_id,
'project_id': self.context.project_id,
'host': 'foo'}
db_inst = db.instance_create(self.context, values)
inst = objects.Instance.get_by_uuid(self.context, db_inst['uuid'])
self.assertFalse(inst.obj_attr_is_set('fault'))
self.flags(instance_name_template='foo-%(uuid)s')
self.assertEqual('foo-%s' % db_inst['uuid'], inst.name)
self.assertFalse(inst.obj_attr_is_set('fault'))
def test_from_db_object_not_overwrite_info_cache(self):
info_cache = instance_info_cache.InstanceInfoCache()
inst = objects.Instance(context=self.context,
info_cache=info_cache)
db_inst = fake_instance.fake_db_instance()
db_inst['info_cache'] = dict(
test_instance_info_cache.fake_info_cache)
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIs(info_cache, inst.info_cache)
def test_from_db_object_info_cache_not_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('info_cache')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['info_cache'])
self.assertIsNone(inst.info_cache)
def test_from_db_object_security_groups_net_set(self):
inst = instance.Instance(context=self.context,
info_cache=None)
db_inst = fake_instance.fake_db_instance()
db_inst.pop('security_groups')
inst._from_db_object(self.context, inst, db_inst,
expected_attrs=['security_groups'])
self.assertEqual([], inst.security_groups.objects)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
def test_get_with_pci_requests(self, mock_get):
mock_get.return_value = objects.InstancePCIRequests()
db_instance = db.instance_create(self.context, {
'user_id': self.context.user_id,
'project_id': self.context.project_id})
instance = objects.Instance.get_by_uuid(
self.context, db_instance['uuid'],
expected_attrs=['pci_requests'])
self.assertTrue(instance.obj_attr_is_set('pci_requests'))
self.assertIsNotNone(instance.pci_requests)
def test_get_flavor(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor().flavorid)
def test_get_flavor_namespace(self):
db_flavor = flavors.get_default_flavor()
inst = objects.Instance(old_flavor=db_flavor)
self.assertEqual(db_flavor['flavorid'],
inst.get_flavor('old').flavorid)
def _test_set_flavor(self, namespace):
prefix = ('%s_' % namespace) if namespace else ''
db_flavor = flavors.get_default_flavor()
inst = objects.Instance()
with mock.patch.object(inst, 'save'):
inst.set_flavor(db_flavor, namespace)
self.assertEqual(db_flavor['flavorid'],
getattr(inst, '%sflavor' % prefix).flavorid)
def test_set_flavor(self):
self._test_set_flavor(None)
def test_set_flavor_namespace(self):
self._test_set_flavor('old')
def test_delete_flavor(self):
inst = objects.Instance(
old_flavor=flavors.get_default_flavor())
with mock.patch.object(inst, 'save'):
inst.delete_flavor('old')
self.assertIsNone(inst.old_flavor)
def test_delete_flavor_no_namespace_fails(self):
inst = objects.Instance(system_metadata={})
self.assertRaises(ValueError, inst.delete_flavor, None)
self.assertRaises(ValueError, inst.delete_flavor, '')
@mock.patch.object(db, 'instance_metadata_delete')
def test_delete_metadata_key(self, db_delete):
inst = objects.Instance(context=self.context,
id=1, uuid='fake-uuid')
inst.metadata = {'foo': '1', 'bar': '2'}
inst.obj_reset_changes()
inst.delete_metadata_key('foo')
self.assertEqual({'bar': '2'}, inst.metadata)
self.assertEqual({}, inst.obj_get_changes())
db_delete.assert_called_once_with(self.context, inst.uuid, 'foo')
def test_reset_changes(self):
inst = objects.Instance()
inst.metadata = {'1985': 'present'}
inst.system_metadata = {'1955': 'past'}
self.assertEqual({}, inst._orig_metadata)
inst.obj_reset_changes(['metadata'])
self.assertEqual({'1985': 'present'}, inst._orig_metadata)
self.assertEqual({}, inst._orig_system_metadata)
def test_load_generic_calls_handler(self):
inst = objects.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(inst, '_load_generic') as mock_load:
def fake_load(name):
inst.system_metadata = {}
mock_load.side_effect = fake_load
inst.system_metadata
mock_load.assert_called_once_with('system_metadata')
def test_load_fault_calls_handler(self):
inst = objects.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(inst, '_load_fault') as mock_load:
def fake_load():
inst.fault = None
mock_load.side_effect = fake_load
inst.fault
mock_load.assert_called_once_with()
def test_load_ec2_ids_calls_handler(self):
inst = objects.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(inst, '_load_ec2_ids') as mock_load:
def fake_load():
inst.ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_load.side_effect = fake_load
inst.ec2_ids
mock_load.assert_called_once_with()
def test_load_migration_context(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
return_value=test_mig_ctxt.fake_migration_context_obj
) as mock_get:
inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
def test_load_migration_context_no_context(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid',
side_effect=exception.MigrationContextNotFound(
instance_uuid=inst.uuid)
) as mock_get:
mig_ctxt = inst.migration_context
mock_get.assert_called_once_with(self.context, inst.uuid)
self.assertIsNone(mig_ctxt)
def test_load_migration_context_no_data(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
with mock.patch.object(
objects.MigrationContext, 'get_by_instance_uuid') as mock_get:
loaded_ctxt = inst._load_migration_context(db_context=None)
self.assertFalse(mock_get.called)
self.assertIsNone(loaded_ctxt)
def test_apply_revert_migration_context(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid', numa_topology=None)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.apply_migration_context()
self.assertIsInstance(inst.numa_topology, objects.InstanceNUMATopology)
inst.revert_migration_context()
self.assertIsNone(inst.numa_topology)
def test_drop_migration_context(self):
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
inst.migration_context.instance_uuid = inst.uuid
inst.migration_context.id = 7
with mock.patch(
'nova.db.instance_extra_update_by_uuid') as update_extra:
inst.drop_migration_context()
self.assertIsNone(inst.migration_context)
update_extra.assert_called_once_with(self.context, inst.uuid,
{"migration_context": None})
def test_mutated_migration_context(self):
numa_topology = (test_instance_numa_topology.
fake_obj_numa_topology.obj_clone())
numa_topology.cells[0].memory = 1024
numa_topology.cells[1].memory = 1024
inst = instance.Instance(context=self.context,
uuid='fake-uuid', numa_topology=numa_topology)
inst.migration_context = test_mig_ctxt.get_fake_migration_context_obj(
self.context)
with inst.mutated_migration_context():
self.assertIs(inst.numa_topology,
inst.migration_context.new_numa_topology)
self.assertIs(numa_topology, inst.numa_topology)
@mock.patch.object(objects.Instance, 'get_by_uuid')
def test_load_generic(self, mock_get):
inst2 = instance.Instance(metadata={'foo': 'bar'})
mock_get.return_value = inst2
inst = instance.Instance(context=self.context,
uuid='fake-uuid')
inst.metadata
@mock.patch('nova.db.instance_fault_get_by_instance_uuids')
def test_load_fault(self, mock_get):
fake_fault = test_instance_fault.fake_faults['fake-uuid'][0]
mock_get.return_value = {'fake': [fake_fault]}
inst = objects.Instance(context=self.context, uuid='fake')
fault = inst.fault
mock_get.assert_called_once_with(self.context, ['fake'])
self.assertEqual(fake_fault['id'], fault.id)
self.assertNotIn('metadata', inst.obj_what_changed())
@mock.patch('nova.objects.EC2Ids.get_by_instance')
def test_load_ec2_ids(self, mock_get):
fake_ec2_ids = objects.EC2Ids(instance_id='fake-inst',
ami_id='fake-ami')
mock_get.return_value = fake_ec2_ids
inst = objects.Instance(context=self.context, uuid='fake')
ec2_ids = inst.ec2_ids
mock_get.assert_called_once_with(self.context, inst)
self.assertEqual(fake_ec2_ids, ec2_ids)
def test_get_with_extras(self):
pci_requests = objects.InstancePCIRequests(requests=[
objects.InstancePCIRequest(count=123, spec=[])])
inst = objects.Instance(context=self.context,
user_id=self.context.user_id,
project_id=self.context.project_id,
pci_requests=pci_requests)
inst.create()
uuid = inst.uuid
inst = objects.Instance.get_by_uuid(self.context, uuid)
self.assertFalse(inst.obj_attr_is_set('pci_requests'))
inst = objects.Instance.get_by_uuid(
self.context, uuid, expected_attrs=['pci_requests'])
self.assertTrue(inst.obj_attr_is_set('pci_requests'))
class TestInstanceObject(test_objects._LocalTest,
_TestInstanceObject):
def _test_save_objectfield_fk_constraint_fails(self, foreign_key,
expected_exception):
# NOTE(danms): Do this here and not in the remote test because
# we're mocking out obj_attr_is_set() without the thing actually
# being set, which confuses the heck out of the serialization
# stuff.
error = db_exc.DBReferenceError('table', 'constraint', foreign_key,
'key_table')
# Prevent lazy-loading any fields, results in InstanceNotFound
attrs = objects.instance.INSTANCE_OPTIONAL_ATTRS
instance = fake_instance.fake_instance_obj(self.context,
expected_attrs=attrs)
fields_with_save_methods = [field for field in instance.fields
if hasattr(instance, '_save_%s' % field)]
for field in fields_with_save_methods:
@mock.patch.object(instance, '_save_%s' % field)
@mock.patch.object(instance, 'obj_attr_is_set')
def _test(mock_is_set, mock_save_field):
mock_is_set.return_value = True
mock_save_field.side_effect = error
instance.obj_reset_changes(fields=[field])
instance._changed_fields.add(field)
self.assertRaises(expected_exception, instance.save)
instance.obj_reset_changes(fields=[field])
_test()
def test_save_objectfield_missing_instance_row(self):
self._test_save_objectfield_fk_constraint_fails(
'instance_uuid', exception.InstanceNotFound)
def test_save_objectfield_reraises_if_not_instance_related(self):
self._test_save_objectfield_fk_constraint_fails(
'other_foreign_key', db_exc.DBReferenceError)
class TestRemoteInstanceObject(test_objects._RemoteTest,
_TestInstanceObject):
pass
class _TestInstanceListObject(object):
def fake_instance(self, id, updates=None):
db_inst = fake_instance.fake_db_instance(id=2,
access_ip_v4='1.2.3.4',
access_ip_v6='::1')
db_inst['terminated_at'] = None
db_inst['deleted_at'] = None
db_inst['created_at'] = None
db_inst['updated_at'] = None
db_inst['launched_at'] = datetime.datetime(1955, 11, 12,
22, 4, 0)
db_inst['security_groups'] = []
db_inst['deleted'] = 0
db_inst['info_cache'] = dict(test_instance_info_cache.fake_info_cache,
instance_uuid=db_inst['uuid'])
if updates:
db_inst.update(updates)
return db_inst
def test_get_all_by_filters(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, {'foo': 'bar'}, 'uuid',
'asc', limit=None, marker=None,
columns_to_join=['metadata'],
use_slave=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_all_by_filters_sorted(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters_sort')
db.instance_get_all_by_filters_sort(self.context, {'foo': 'bar'},
limit=None, marker=None,
columns_to_join=['metadata'],
use_slave=False,
sort_keys=['uuid'],
sort_dirs=['asc']).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, expected_attrs=['metadata'],
use_slave=False, sort_keys=['uuid'], sort_dirs=['asc'])
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_non_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Single sort key/direction is set, call non-sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir',
limit=100, marker='uuid', use_slave=True)
mock_get_by_filters.assert_called_once_with(
self.context, {'foo': 'bar'}, 'key', 'dir', limit=100,
marker='uuid', columns_to_join=None, use_slave=True)
self.assertEqual(0, mock_get_by_filters_sort.call_count)
@mock.patch.object(db, 'instance_get_all_by_filters_sort')
@mock.patch.object(db, 'instance_get_all_by_filters')
def test_get_all_by_filters_calls_sort(self,
mock_get_by_filters,
mock_get_by_filters_sort):
'''Verifies InstanceList.get_by_filters calls correct DB function.'''
# Multiple sort keys/directions are set, call sorted DB function
objects.InstanceList.get_by_filters(
self.context, {'foo': 'bar'}, limit=100, marker='uuid',
use_slave=True, sort_keys=['key1', 'key2'],
sort_dirs=['dir1', 'dir2'])
mock_get_by_filters_sort.assert_called_once_with(
self.context, {'foo': 'bar'}, limit=100,
marker='uuid', columns_to_join=None, use_slave=True,
sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2'])
self.assertEqual(0, mock_get_by_filters.call_count)
def test_get_all_by_filters_works_for_cleaned(self):
fakes = [self.fake_instance(1),
self.fake_instance(2, updates={'deleted': 2,
'cleaned': None})]
self.context.read_deleted = 'yes'
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context,
{'deleted': True, 'cleaned': False},
'uuid', 'asc', limit=None, marker=None,
columns_to_join=['metadata'],
use_slave=False).AndReturn(
[fakes[1]])
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_filters(
self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc',
expected_attrs=['metadata'], use_slave=False)
self.assertEqual(1, len(inst_list))
self.assertIsInstance(inst_list.objects[0], instance.Instance)
self.assertEqual(fakes[1]['uuid'], inst_list.objects[0].uuid)
def test_get_by_host(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
db.instance_get_all_by_host(self.context, 'foo',
columns_to_join=None,
use_slave=False).AndReturn(fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host(self.context, 'foo')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
self.assertEqual(self.context, inst_list.objects[i]._context)
self.assertEqual(set(), inst_list.obj_what_changed())
def test_get_by_host_and_node(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host_and_node(self.context, 'foo', 'bar',
columns_to_join=None).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host_and_node(self.context,
'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_by_host_and_not_type(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_not_type')
db.instance_get_all_by_host_and_not_type(self.context, 'foo',
type_id='bar').AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_by_host_and_not_type(
self.context, 'foo', 'bar')
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
@mock.patch('nova.objects.instance._expected_cols')
@mock.patch('nova.db.instance_get_all')
def test_get_all(self, mock_get_all, mock_exp):
fakes = [self.fake_instance(1), self.fake_instance(2)]
mock_get_all.return_value = fakes
mock_exp.return_value = mock.sentinel.exp_att
inst_list = objects.InstanceList.get_all(
self.context, expected_attrs='fake')
mock_get_all.assert_called_once_with(
self.context, columns_to_join=mock.sentinel.exp_att)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_hung_in_rebooting(self):
fakes = [self.fake_instance(1),
self.fake_instance(2)]
dt = utils.isotime()
self.mox.StubOutWithMock(db, 'instance_get_all_hung_in_rebooting')
db.instance_get_all_hung_in_rebooting(self.context, dt).AndReturn(
fakes)
self.mox.ReplayAll()
inst_list = objects.InstanceList.get_hung_in_rebooting(self.context,
dt)
for i in range(0, len(fakes)):
self.assertIsInstance(inst_list.objects[i], instance.Instance)
self.assertEqual(fakes[i]['uuid'], inst_list.objects[i].uuid)
def test_get_active_by_window_joined(self):
fakes = [self.fake_instance(1), self.fake_instance(2)]
# NOTE(mriedem): Send in a timezone-naive datetime since the
# InstanceList.get_active_by_window_joined method should convert it
# to tz-aware for the DB API call, which we'll assert with our stub.
dt = timeutils.utcnow()
def fake_instance_get_active_by_window_joined(context, begin, end,
project_id, host,
columns_to_join):
# make sure begin is tz-aware
self.assertIsNotNone(begin.utcoffset())
self.assertIsNone(end)
self.assertEqual(['metadata'], columns_to_join)
return fakes
with mock.patch.object(db, 'instance_get_active_by_window_joined',
fake_instance_get_active_by_window_joined):
inst_list = objects.InstanceList.get_active_by_window_joined(
self.context, dt, expected_attrs=['metadata'])
for fake, obj in zip(fakes, inst_list.objects):
self.assertIsInstance(obj, instance.Instance)
self.assertEqual(fake['uuid'], obj.uuid)
def test_with_fault(self):
fake_insts = [
fake_instance.fake_db_instance(uuid='fake-uuid', host='host'),
fake_instance.fake_db_instance(uuid='fake-inst2', host='host'),
]
fake_faults = test_instance_fault.fake_faults
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_get_all_by_host(self.context, 'host',
columns_to_join=[],
use_slave=False
).AndReturn(fake_insts)
db.instance_fault_get_by_instance_uuids(
self.context, [x['uuid'] for x in fake_insts]
).AndReturn(fake_faults)
self.mox.ReplayAll()
instances = objects.InstanceList.get_by_host(self.context, 'host',
expected_attrs=['fault'],
use_slave=False)
self.assertEqual(2, len(instances))
self.assertEqual(fake_faults['fake-uuid'][0],
dict(instances[0].fault))
self.assertIsNone(instances[1].fault)
def test_fill_faults(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
inst1 = objects.Instance(uuid='uuid1')
inst2 = objects.Instance(uuid='uuid2')
insts = [inst1, inst2]
for inst in insts:
inst.obj_reset_changes()
db_faults = {
'uuid1': [{'id': 123,
'instance_uuid': 'uuid1',
'code': 456,
'message': 'Fake message',
'details': 'No details',
'host': 'foo',
'deleted': False,
'deleted_at': None,
'updated_at': None,
'created_at': None,
}
]}
db.instance_fault_get_by_instance_uuids(self.context,
[x.uuid for x in insts],
).AndReturn(db_faults)
self.mox.ReplayAll()
inst_list = objects.InstanceList()
inst_list._context = self.context
inst_list.objects = insts
faulty = inst_list.fill_faults()
self.assertEqual(['uuid1'], list(faulty))
self.assertEqual(db_faults['uuid1'][0]['message'],
inst_list[0].fault.message)
self.assertIsNone(inst_list[1].fault)
for inst in inst_list:
self.assertEqual(set(), inst.obj_what_changed())
@mock.patch('nova.objects.instance.Instance.obj_make_compatible')
def test_get_by_security_group(self, mock_compat):
fake_secgroup = dict(test_security_group.fake_secgroup)
fake_secgroup['instances'] = [
fake_instance.fake_db_instance(id=1,
system_metadata={'foo': 'bar'}),
fake_instance.fake_db_instance(id=2),
]
with mock.patch.object(db, 'security_group_get') as sgg:
sgg.return_value = fake_secgroup
secgroup = security_group.SecurityGroup()
secgroup.id = fake_secgroup['id']
instances = instance.InstanceList.get_by_security_group(
self.context, secgroup)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
self.assertTrue(instances[0].obj_attr_is_set('system_metadata'))
self.assertEqual({'foo': 'bar'}, instances[0].system_metadata)
def test_get_by_grantee_security_group_ids(self):
fake_instances = [
fake_instance.fake_db_instance(id=1),
fake_instance.fake_db_instance(id=2)
]
with mock.patch.object(
db, 'instance_get_all_by_grantee_security_groups') as igabgsg:
igabgsg.return_value = fake_instances
secgroup_ids = [1]
instances = objects.InstanceList.get_by_grantee_security_group_ids(
self.context, secgroup_ids)
igabgsg.assert_called_once_with(self.context, secgroup_ids)
self.assertEqual(2, len(instances))
self.assertEqual([1, 2], [x.id for x in instances])
class TestInstanceListObject(test_objects._LocalTest,
_TestInstanceListObject):
pass
class TestRemoteInstanceListObject(test_objects._RemoteTest,
_TestInstanceListObject):
pass
class TestInstanceObjectMisc(test.TestCase):
def test_expected_cols(self):
self.stubs.Set(instance, '_INSTANCE_OPTIONAL_JOINED_FIELDS', ['bar'])
self.assertEqual(['bar'], instance._expected_cols(['foo', 'bar']))
self.assertIsNone(instance._expected_cols(None))
def test_expected_cols_extra(self):
self.assertEqual(['metadata', 'extra', 'extra.numa_topology'],
instance._expected_cols(['metadata',
'numa_topology']))
|
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from deploy_board.webapp.helpers.rodimus_client import RodimusClient
rodimus_client = RodimusClient()
TerminationPolicy = ["Default", "OldestInstance", "NewestInstance", "OldestLaunchConfiguration",
"ClosestToNextInstanceHour"]
Comparator = ["GreaterThanOrEqualToThreshold", "GreaterThanThreshold", "LessThanOrEqualToThreshold",
"LessThanThreshold"]
#
# Groups resource
#
def get_env_group_names(request, start, size):
params = {"start": start, "size": size}
return rodimus_client.get("/groups/names", request.teletraan_user_id.token, params=params)
def create_launch_config(request, group_name, asg_info):
return rodimus_client.post("/groups/%s" % group_name, request.teletraan_user_id.token, data=asg_info)
def update_launch_config(request, group_name, asg_info):
return rodimus_client.put("/groups/%s" % group_name, request.teletraan_user_id.token, data=asg_info)
def update_group_info(request, group_name, group_info):
return rodimus_client.put("/groups/%s/config" % group_name, request.teletraan_user_id.token, data=group_info)
def get_group_info(request, group_name):
return rodimus_client.get("/groups/%s" % group_name, request.teletraan_user_id.token)
def launch_hosts(request, group_name, host_count, subnet):
params = {"hostCount": host_count, "subnet": subnet}
return rodimus_client.put("/groups/%s/hosts" % group_name, request.teletraan_user_id.token, params=params)
def launch_hosts_with_placement_group(request, group_name, host_count, subnet, placement_group):
data = {"hostCount": host_count,
"cloudLaunchConfig": {"subnet": subnet, "placementGroup": placement_group}}
# Note: this sends a post request while above sends a put
return rodimus_client.post("/groups/%s/hosts" % group_name, request.teletraan_user_id.token, data=data)
def terminate_all_hosts(request, group_name):
return rodimus_client.delete("/groups/%s/terminate/all" % group_name, request.teletraan_user_id.token)
# Health Checks
def create_health_check(request, group_name, health_check_info):
return rodimus_client.post("/groups/%s/healthcheck" % group_name, request.teletraan_user_id.token,
data=health_check_info)
def enable_health_check(request, group_name):
params = [('actionType', 'ENABLE')]
return rodimus_client.post("/groups/%s/healthcheck/action" % group_name, request.teletraan_user_id.token,
params=params)
def disable_health_check(request, group_name):
params = [('actionType', 'DISABLE')]
return rodimus_client.post("/groups/%s/healthcheck/action" % group_name, request.teletraan_user_id.token,
params=params)
def get_health_check_activities(request, group_name, index, size):
params = [('pageIndex', index), ('pageSize', size)]
return rodimus_client.get("/groups/%s/healthchecks/" % group_name, request.teletraan_user_id.token, params=params)
def get_health_check(request, id):
return rodimus_client.get("/groups/healthchecks/%s" % id, request.teletraan_user_id.token)
def get_health_check_error(request, id):
return rodimus_client.get("/groups/healthchecks/errors/%s" % id, request.teletraan_user_id.token)
# Config history
def get_config_history(request, group_name, index, size):
params = [('pageIndex', index), ('pageSize', size)]
return rodimus_client.get("/groups/%s/configs/history/" % group_name, request.teletraan_user_id.token, params=params)
#
# AutoScalingGroups resource
#
def create_autoscaling(request, cluster_name, asg_info):
return rodimus_client.post("/clusters/%s/autoscaling" % cluster_name, request.teletraan_user_id.token,
data=asg_info)
def update_autoscaling(request, cluster_name, asg_info):
return rodimus_client.put("/clusters/%s/autoscaling" % cluster_name, request.teletraan_user_id.token, data=asg_info)
def delete_autoscaling(request, cluster_name, detach_host):
params = [('detachHosts', detach_host)]
return rodimus_client.delete("/clusters/%s/autoscaling" % cluster_name, request.teletraan_user_id.token,
params=params)
def get_autoscaling(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling" % cluster_name, request.teletraan_user_id.token)
def get_autoscaling_summary(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/summary" % cluster_name, request.teletraan_user_id.token)
# Asg Actions
def get_autoscaling_status(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/status" % cluster_name, request.teletraan_user_id.token)
def enable_autoscaling(request, cluster_name):
params = [('actionType', 'ENABLE')]
return rodimus_client.post("/clusters/%s/autoscaling/action" % cluster_name, request.teletraan_user_id.token,
params=params)
def disable_autoscaling(request, cluster_name):
params = [('actionType', 'DISABLE')]
return rodimus_client.post("/clusters/%s/autoscaling/action" % cluster_name, request.teletraan_user_id.token,
params=params)
def disable_scaling_down_event(request, cluster_name):
params = [('actionType', 'DISABLE_TERMINATE')]
return rodimus_client.post("/clusters/%s/autoscaling/action" % cluster_name, request.teletraan_user_id.token,
params=params)
def get_disabled_asg_actions(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/action" % cluster_name, request.teletraan_user_id.token)
# Asg Alarms
def put_scaling_policies(request, cluster_name, policies_info):
return rodimus_client.post("/clusters/%s/autoscaling/policies" % cluster_name, request.teletraan_user_id.token,
data=policies_info)
def get_policies(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/policies" % cluster_name, request.teletraan_user_id.token)
def add_alarm(request, cluster_name, alarm_infos):
return rodimus_client.post("/clusters/%s/autoscaling/alarms" % cluster_name, request.teletraan_user_id.token,
data=alarm_infos)
def update_alarms(request, cluster_name, alarm_infos):
return rodimus_client.put("/clusters/%s/autoscaling/alarms" % cluster_name, request.teletraan_user_id.token,
data=alarm_infos)
def get_alarms(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/alarms" % cluster_name, request.teletraan_user_id.token)
def delete_alarm(request, cluster_name, alarm_id):
return rodimus_client.delete("/clusters/%s/autoscaling/alarms/%s" % (cluster_name, alarm_id),
request.teletraan_user_id.token)
def get_alarm_state(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/alarmstate" % cluster_name, request.teletraan_user_id.token)
def get_system_metrics(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/metrics/system" % cluster_name, request.teletraan_user_id.token)
# Asg Schedules
def add_scheduled_actions(request, cluster_name, schedule_actions):
return rodimus_client.post("/clusters/%s/autoscaling/schedules" % cluster_name, request.teletraan_user_id.token,
data=schedule_actions)
def delete_scheduled_action(request, cluster_name, action_id):
return rodimus_client.delete("/clusters/%s/autoscaling/schedules/%s" % (cluster_name, action_id),
request.teletraan_user_id.token)
def get_scheduled_actions(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/schedules" % cluster_name,
request.teletraan_user_id.token)
# Scaling activities
def get_scaling_activities(request, cluster_name, page_size, token):
params = {"size": page_size, "token": token}
return rodimus_client.get("/clusters/%s/autoscaling/activities" % cluster_name, request.teletraan_user_id.token,
params=params)
# pas
def update_pas_config(request, cluster_name, pas_config):
return rodimus_client.put("/clusters/%s/autoscaling/pas" % cluster_name, request.teletraan_user_id.token,
data=pas_config)
def get_pas_config(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/pas" % cluster_name, request.teletraan_user_id.token)
# hosts
# TODO no usage
def get_autoscaling_group_hosts(request, cluster_name):
return rodimus_client.get("/clusters/%s/autoscaling/hosts" % cluster_name, request.teletraan_user_id.token)
def hosts_action_in_group(request, cluster_name, host_ids, action):
params = {"actionType": action}
return rodimus_client.post("/clusters/%s/autoscaling/hosts/action" % cluster_name, request.teletraan_user_id.token,
params=params, data=host_ids)
def is_hosts_protected(request, cluster_name, host_ids):
return rodimus_client.get("/clusters/%s/autoscaling/host/protection" % cluster_name,
request.teletraan_user_id.token, data=host_ids)
|
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_str,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
HEADRequest,
sanitized_Request,
unescapeHTML,
url_basename,
RegexNotFoundError,
)
def _media_xml_tag(tag):
return '{http://search.yahoo.com/mrss/}%s' % tag
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
_LANG = None
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
# This was originally implemented for ComedyCentral, but it also works here
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
if not m:
return rtmp_video_url
base = 'http://viacommtvstrmfs.fplive.net/'
return base + m.group('finalid')
def _get_feed_url(self, uri):
return self._FEED_URL
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
if thumb_node is None:
return None
else:
return thumb_node.attrib['url']
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
'Downloading mobile page')
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
req = HEADRequest(metrics_url)
response = self._request_webpage(req, mtvn_id, 'Resolving url')
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
return self._extract_mobile_video_formats(mtvn_id)
raise ExtractorError('This video is not available from your country.',
expected=True)
formats = []
for rendition in mdoc.findall('.//rendition'):
try:
_, _, ext = rendition.attrib['type'].partition('/')
rtmp_video_url = rendition.find('./src').text
if rtmp_video_url.endswith('siteunavail.png'):
continue
formats.append({
'ext': ext,
'url': self._transform_rtmp_url(rtmp_video_url),
'format_id': rendition.get('bitrate'),
'width': int(rendition.get('width')),
'height': int(rendition.get('height')),
})
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
self._sort_formats(formats)
return formats
def _extract_subtitles(self, mdoc, mtvn_id):
subtitles = {}
for transcript in mdoc.findall('.//transcript'):
if transcript.get('kind') != 'captions':
continue
lang = transcript.get('srclang')
subtitles[lang] = [{
'url': compat_str(typographic.get('src')),
'ext': typographic.get('format')
} for typographic in transcript.findall('./typographic')]
return subtitles
def _get_video_info(self, itemdoc):
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
mediagen_url += '&' if '?' in mediagen_url else '?'
mediagen_url += 'acceptMethods=fms'
mediagen_doc = self._download_xml(mediagen_url, video_id,
'Downloading video urls')
item = mediagen_doc.find('./video/item')
if item is not None and item.get('type') == 'text':
message = '%s returned error: ' % self.IE_NAME
if item.get('code') is not None:
message += '%s - ' % item.get('code')
message += item.text
raise ExtractorError(message, expected=True)
description_node = itemdoc.find('description')
if description_node is not None:
description = description_node.text.strip()
else:
description = None
title_el = None
if title_el is None:
title_el = find_xpath_attr(
itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title')
if title_el is None:
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None:
title_el = itemdoc.find('.//title') or itemdoc.find('./title')
if title_el.text is None:
title_el = None
title = title_el.text
if title is None:
raise ExtractorError('Could not find video title')
title = title.strip()
# This a short id that's used in the webpage urls
mtvn_id = None
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:id')
if mtvn_id_node is not None:
mtvn_id = mtvn_id_node.text
return {
'title': title,
'formats': self._extract_video_formats(mediagen_doc, mtvn_id),
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
}
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri)
data = compat_urllib_parse.urlencode({'uri': uri})
info_url = feed_url + '?'
if self._LANG:
info_url += 'lang=%s&' % self._LANG
info_url += data
return self._get_videos_info_from_url(info_url, video_id)
def _get_videos_info_from_url(self, url, video_id):
idoc = self._download_xml(
url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
def _real_extract(self, url):
title = url_basename(url)
webpage = self._download_webpage(url, title)
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
og_url = self._og_search_video_url(webpage)
mgid = url_basename(og_url)
if mgid.endswith('.swf'):
mgid = mgid[:-4]
except RegexNotFoundError:
mgid = None
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
webpage, 'mgid', default=None)
if not mgid:
sm4_embed = self._html_search_meta(
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
videos_info = self._get_videos_info(mgid)
return videos_info
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvservices:embedded'
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
_TEST = {
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
'info_dict': {
'id': '1043906',
'ext': 'mp4',
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
site_id = uri.replace(video_id, '')
config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
'context4/context5/config.xml'.format(site_id))
config_doc = self._download_xml(config_url, video_id)
feed_node = config_doc.find('.//feed')
feed_url = feed_node.text.strip().split('?')[0]
return feed_url
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mgid = mobj.group('mgid')
return self._get_videos_info(mgid)
class MTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
_TESTS = [
{
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
'info_dict': {
'id': '853555',
'ext': 'mp4',
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
},
},
]
def _get_thumbnail_url(self, uri, itemdoc):
return 'http://mtv.mtvnimages.com/uri/' + uri
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
if m_vevo:
vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
class MTVIggyIE(MTVServicesInfoExtractor):
IE_NAME = 'mtviggy.com'
_VALID_URL = r'https?://www\.mtviggy\.com/videos/.+'
_TEST = {
'url': 'http://www.mtviggy.com/videos/arcade-fire-behind-the-scenes-at-the-biggest-music-experiment-yet/',
'info_dict': {
'id': '984696',
'ext': 'mp4',
'title': 'Arcade Fire: Behind the Scenes at the Biggest Music Experiment Yet',
}
}
_FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/'
class MTVDEIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv.de'
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
'info_dict': {
'id': 'music_video-a50bc5f0b3aa4b3190aa',
'ext': 'mp4',
'title': 'MusicVideo_cro-traum',
'description': 'Cro - Traum',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
'info_dict': {
'id': 'local_playlist-f5ae778b9832cc837189',
'ext': 'mp4',
'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# single video in pagePlaylist with different id
'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
'info_dict': {
'id': 'local_playlist-4e760566473c4c8c5344',
'ext': 'mp4',
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
'description': 'MTV Movies Supercut',
},
'params': {
# rtmp download
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
playlist = self._parse_json(
self._search_regex(
r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
video_id)
# news pages contain single video in playlist with different id
if len(playlist) == 1:
return self._get_videos_info_from_url(playlist[0]['mrss'], video_id)
for item in playlist:
item_id = item.get('id')
if item_id and compat_str(item_id) == video_id:
return self._get_videos_info_from_url(item['mrss'], video_id)
|
|
# Copyright (c) 2018 Foundries.io
#
# SPDX-License-Identifier: Apache-2.0
import abc
import argparse
import os
import shutil
import subprocess
from west import cmake
from west import log
from west.build import is_zephyr_build
from west.util import quote_sh_list
from runners.core import BuildConfiguration
from build_helpers import find_build_dir, \
BUILD_DIR_DESCRIPTION
from zephyr_ext_common import Forceable, \
cached_runner_config
SIGN_DESCRIPTION = '''\
This command automates some of the drudgery of creating signed Zephyr
binaries for chain-loading by a bootloader.
In the simplest usage, run this from your build directory:
west sign -t your_tool -- ARGS_FOR_YOUR_TOOL
Assuming your binary was properly built for processing and handling by
tool "your_tool", this creates zephyr.signed.bin and zephyr.signed.hex
files (if supported by "your_tool") which are ready for use by your
bootloader. The "ARGS_FOR_YOUR_TOOL" value can be any additional
arguments you want to pass to the tool, such as the location of a
signing key, a version identifier, etc.
See tool-specific help below for details.'''
SIGN_EPILOG = '''\
imgtool
-------
Currently, MCUboot's 'imgtool' tool is supported. To build a signed
binary you can load with MCUboot using imgtool, run this from your
build directory:
west sign -t imgtool -- --key YOUR_SIGNING_KEY.pem
For this to work, either imgtool must be installed (e.g. using pip3),
or you must pass the path to imgtool.py using the -p option.
The image header size, alignment, and slot sizes are determined from
the build directory using .config and the device tree. A default
version number of 0.0.0+0 is used (which can be overridden by passing
"--version x.y.z+w" after "--key"). As shown above, extra arguments
after a '--' are passed to imgtool directly.'''
class ToggleAction(argparse.Action):
def __call__(self, parser, args, ignored, option):
setattr(args, self.dest, not option.startswith('--no-'))
class Sign(Forceable):
def __init__(self):
super(Sign, self).__init__(
'sign',
# Keep this in sync with the string in west-commands.yml.
'sign a Zephyr binary for bootloader chain-loading',
SIGN_DESCRIPTION,
accepts_unknown_args=False)
def do_add_parser(self, parser_adder):
parser = parser_adder.add_parser(
self.name,
epilog=SIGN_EPILOG,
help=self.help,
formatter_class=argparse.RawDescriptionHelpFormatter,
description=self.description)
parser.add_argument('-d', '--build-dir', help=BUILD_DIR_DESCRIPTION)
self.add_force_arg(parser)
# general options
group = parser.add_argument_group('tool control options')
group.add_argument('-t', '--tool', choices=['imgtool'], required=True,
help='''image signing tool name; only imgtool is
currently supported''')
group.add_argument('-p', '--tool-path', default=None,
help='''path to the tool itself, if needed''')
group.add_argument('tool_args', nargs='*', metavar='tool_opt',
help='extra option(s) to pass to the signing tool')
# bin file options
group = parser.add_argument_group('binary (.bin) file options')
group.add_argument('--bin', '--no-bin', dest='gen_bin', nargs=0,
action=ToggleAction,
help='''produce a signed .bin file?
(default: yes, if supported)''')
group.add_argument('-B', '--sbin', metavar='BIN',
default='zephyr.signed.bin',
help='''signed .bin file name
(default: zephyr.signed.bin)''')
# hex file options
group = parser.add_argument_group('Intel HEX (.hex) file options')
group.add_argument('--hex', '--no-hex', dest='gen_hex', nargs=0,
action=ToggleAction,
help='''produce a signed .hex file?
(default: yes, if supported)''')
group.add_argument('-H', '--shex', metavar='HEX',
default='zephyr.signed.hex',
help='''signed .hex file name
(default: zephyr.signed.hex)''')
# defaults for hex/bin generation
parser.set_defaults(gen_bin=True, gen_hex=True)
return parser
def do_run(self, args, ignored):
if not (args.gen_bin or args.gen_hex):
return
# Provide the build directory if not given, and defer to the signer.
args.build_dir = find_build_dir(args.build_dir)
self.args = args # for check_force
self.check_force(os.path.isdir(args.build_dir),
'no such build directory {}'.format(args.build_dir))
self.check_force(is_zephyr_build(args.build_dir),
"build directory {} doesn't look like a Zephyr build "
'directory'.format(args.build_dir))
if args.tool == 'imgtool':
signer = ImgtoolSigner()
# (Add support for other signers here in elif blocks)
else:
raise RuntimeError("can't happen")
signer.sign(self)
class Signer(abc.ABC):
'''Common abstract superclass for signers.
To add support for a new tool, subclass this and add support for
it in the Sign.do_run() method.'''
@abc.abstractmethod
def sign(self, command):
'''Abstract method to perform a signature; subclasses must implement.
:param command: the Sign instance
'''
class ImgtoolSigner(Signer):
def sign(self, command):
args = command.args
cache = cmake.CMakeCache.from_build_dir(args.build_dir)
runner_config = cached_runner_config(args.build_dir, cache)
bcfg = BuildConfiguration(args.build_dir)
# Build a signed .bin
if args.gen_bin and runner_config.bin_file:
sign_bin = self.sign_cmd(command, bcfg, runner_config.bin_file,
args.sbin)
log.dbg(quote_sh_list(sign_bin))
subprocess.check_call(sign_bin)
# Build a signed .hex
if args.gen_hex and runner_config.hex_file:
sign_hex = self.sign_cmd(command, bcfg, runner_config.hex_file,
args.shex)
log.dbg(quote_sh_list(sign_hex))
subprocess.check_call(sign_hex)
def sign_cmd(self, command, bcfg, infile, outfile):
align, vtoff, slot_size = [self.get_cfg_str(command, bcfg, x) for x in
('DT_FLASH_WRITE_BLOCK_SIZE',
'CONFIG_TEXT_SECTION_OFFSET',
'DT_FLASH_AREA_IMAGE_0_SIZE')]
align_arg = ['--align', align] if align else []
header_arg = ['--header-size', vtoff] if vtoff else []
slot_arg = ['--slot-size', slot_size] if slot_size else []
args = command.args
if args.tool_path:
tool_path = args.tool_path
else:
tool_path = shutil.which('imgtool')
if not tool_path:
log.die('imgtool not found; either install it',
'(e.g. "pip3 install imgtool") or provide --tool-path')
sign_command = ([tool_path,
'sign'] +
align_arg +
header_arg +
slot_arg +
# We provide a default --version in case the
# user is just messing around and doesn't want
# to set one. It will be overridden if there is
# a --version in args.tool_args.
['--version', '0.0.0+0',
infile,
outfile])
sign_command.extend(args.tool_args)
return sign_command
def get_cfg_str(self, command, bcfg, item):
try:
return str(bcfg[item])
except KeyError:
command.check_force(
False,
"imgtool parameter unknown, build directory has no {} {}".
format('device tree define' if item.startswith('DT_') else
'Kconfig option',
item))
return None
|
|
import unittest
import collections
from fontTools.misc.py23 import basestring
from fontParts.base import FontPartsError
class TestComponent(unittest.TestCase):
def getComponent_generic(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("A")
pen = glyph.getPen()
pen.moveTo((0, 0))
pen.lineTo((0, 100))
pen.lineTo((100, 100))
pen.lineTo((100, 0))
pen.closePath()
for i, point in enumerate(glyph[0].points):
point.name = "point %d" % i
glyph = layer.newGlyph("B")
component = glyph.appendComponent("A")
component.transformation = (1, 0, 0, 1, 0, 0)
return component
# ----
# repr
# ----
def test_reprContents(self):
component = self.getComponent_generic()
value = component._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
def test_reprContents_noGlyph(self):
component, _ = self.objectGenerator("component")
value = component._reprContents()
self.assertIsInstance(value, list)
for i in value:
self.assertIsInstance(i, basestring)
# -------
# Parents
# -------
def test_get_parent_font(self):
font, _ = self.objectGenerator("font")
layer = font.newLayer("L")
glyph = layer.newGlyph("X")
component = glyph.appendComponent("A")
self.assertIsNotNone(component.font)
self.assertEqual(
component.font,
font
)
def test_get_parent_noFont(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
component = glyph.appendComponent("A")
self.assertIsNone(component.font)
def test_get_parent_layer(self):
layer, _ = self.objectGenerator("layer")
glyph = layer.newGlyph("X")
component = glyph.appendComponent("A")
self.assertIsNotNone(component.layer)
self.assertEqual(
component.layer,
layer
)
def test_get_parent_noLayer(self):
glyph, _ = self.objectGenerator("glyph")
component = glyph.appendComponent("A")
self.assertIsNone(component.font)
self.assertIsNone(component.layer)
def test_get_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
component = glyph.appendComponent("A")
self.assertIsNotNone(component.glyph)
self.assertEqual(
component.glyph,
glyph
)
def test_get_parent_noGlyph(self):
component, _ = self.objectGenerator("component")
self.assertIsNone(component.font)
self.assertIsNone(component.layer)
self.assertIsNone(component.glyph)
def test_set_parent_glyph(self):
glyph, _ = self.objectGenerator("glyph")
component, _ = self.objectGenerator("component")
component.glyph = glyph
self.assertIsNotNone(component.glyph)
self.assertEqual(
component.glyph,
glyph
)
def test_set_parent_glyph_none(self):
component, _ = self.objectGenerator("component")
component.glyph = None
self.assertIsNone(component.glyph)
def test_set_parent_glyph_exists(self):
glyph, _ = self.objectGenerator("glyph")
otherGlyph, _ = self.objectGenerator("glyph")
component = glyph.appendComponent("A")
with self.assertRaises(AssertionError):
component.glyph = otherGlyph
# ----------
# Attributes
# ----------
# baseGlyph
def test_baseGlyph_generic(self):
component = self.getComponent_generic()
self.assertEqual(
component.baseGlyph,
"A"
)
def test_baseGlyph_valid_set(self):
component = self.getComponent_generic()
component.baseGlyph = "B"
self.assertEqual(
component.baseGlyph,
"B"
)
def test_baseGlyph_invalid_set_none(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.baseGlyph = None
def test_baseGlyph_invalid_set_empty_string(self):
component = self.getComponent_generic()
with self.assertRaises(ValueError):
component.baseGlyph = ""
def test_baseGlyph_invalid_set_int(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.baseGlyph = 123
# transformation
def test_transformation_generic(self):
component = self.getComponent_generic()
self.assertEqual(
component.transformation,
(1, 0, 0, 1, 0, 0)
)
def test_transformation_valid_set_positive(self):
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
self.assertEqual(
component.transformation,
(1, 2, 3, 4, 5, 6)
)
def test_transformation_valid_set_negative(self):
component = self.getComponent_generic()
component.transformation = (-1, -2, -3, -4, -5, -6)
self.assertEqual(
component.transformation,
(-1, -2, -3, -4, -5, -6)
)
def test_transformation_invalid_set_member(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.transformation = (1, 0, 0, 1, 0, "0")
def test_transformation_invalid_set_none(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.transformation = None
# offset
def test_offset_generic(self):
component = self.getComponent_generic()
self.assertEqual(
component.offset,
(0, 0)
)
def test_offset_valid_set_zero(self):
component = self.getComponent_generic()
component.offset = (0, 0)
self.assertEqual(
component.offset,
(0, 0)
)
def test_offset_valid_set_positive_positive(self):
component = self.getComponent_generic()
component.offset = (1, 2)
self.assertEqual(
component.offset,
(1, 2)
)
def test_offset_valid_set_negative_positive(self):
component = self.getComponent_generic()
component.offset = (-1, 2)
self.assertEqual(
component.offset,
(-1, 2)
)
def test_offset_valid_set_positive_negative(self):
component = self.getComponent_generic()
component.offset = (1, -2)
self.assertEqual(
component.offset,
(1, -2)
)
def test_offset_valid_set_negative_negative(self):
component = self.getComponent_generic()
component.offset = (-1, -2)
self.assertEqual(
component.offset,
(-1, -2)
)
def test_offset_invalid_set_real_bogus(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.offset = (1, "2")
def test_offset_invalid_set_bogus_real(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.offset = ("1", 2)
def test_offset_invalid_set_int(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.offset = 1
def test_offset_invalid_set_none(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.offset = None
# scale
def test_scale_generic(self):
component = self.getComponent_generic()
self.assertEqual(
component.scale,
(1, 1)
)
def test_scale_valid_set_zero(self):
component = self.getComponent_generic()
component.scale = (0, 0)
self.assertEqual(
component.scale,
(0, 0)
)
def test_scale_valid_set_positive_positive(self):
component = self.getComponent_generic()
component.scale = (1, 2)
self.assertEqual(
component.scale,
(1, 2)
)
def test_scale_valid_set_negative_positive(self):
component = self.getComponent_generic()
component.scale = (-1, 2)
self.assertEqual(
component.scale,
(-1, 2)
)
def test_scale_valid_set_positive_negative(self):
component = self.getComponent_generic()
component.scale = (1, -2)
self.assertEqual(
component.scale,
(1, -2)
)
def test_scale_valid_set_negative_negative(self):
component = self.getComponent_generic()
component.scale = (-1, -2)
self.assertEqual(
component.scale,
(-1, -2)
)
def test_scale_invalid_set_real_bogus(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.scale = (1, "2")
def test_scale_invalid_set_bogus_real(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.scale = ("1", 2)
def test_scale_invalid_set_int(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.scale = 1
def test_scale_invalid_set_none(self):
component = self.getComponent_generic()
with self.assertRaises(TypeError):
component.scale = None
# --------------
# Identification
# --------------
# index
def getComponent_index(self):
glyph, _ = self.objectGenerator("glyph")
glyph.appendComponent("A")
glyph.appendComponent("B")
glyph.appendComponent("C")
return glyph
def test_get_index_noParent(self):
component, _ = self.objectGenerator("component")
self.assertIsNone(component.index)
def test_get_index(self):
glyph = self.getComponent_index()
for i, component in enumerate(glyph.components):
self.assertEqual(component.index, i)
def test_set_index_noParent(self):
component, _ = self.objectGenerator("component")
with self.assertRaises(FontPartsError):
component.index = 1
def test_set_index_positive(self):
glyph = self.getComponent_index()
component = glyph.components[0]
component.index = 2
self.assertEqual(
[c.baseGlyph for c in glyph.components],
["B", "A", "C"]
)
def test_set_index_pastLength(self):
glyph = self.getComponent_index()
component = glyph.components[0]
component.index = 20
self.assertEqual(
[c.baseGlyph for c in glyph.components],
["B", "C", "A"]
)
def test_set_index_negative(self):
glyph = self.getComponent_index()
component = glyph.components[1]
component.index = -1
self.assertEqual(
[c.baseGlyph for c in glyph.components],
["B", "A", "C"]
)
# identifier
def test_identifier_get_none(self):
component = self.getComponent_generic()
self.assertIsNone(component.identifier)
def test_identifier_generated_type(self):
component = self.getComponent_generic()
component.generateIdentifier()
self.assertIsInstance(component.identifier, basestring)
def test_identifier_consistency(self):
component = self.getComponent_generic()
component.generateIdentifier()
# get: twice to test consistency
self.assertEqual(component.identifier, component.identifier)
def test_identifier_cannot_set(self):
# identifier is a read-only property
component = self.getComponent_generic()
with self.assertRaises(FontPartsError):
component.identifier = "ABC"
# ----
# Copy
# ----
def getComponent_copy(self):
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
return component
def test_copy_seperate_objects(self):
component = self.getComponent_copy()
copied = component.copy()
self.assertIsNot(component, copied)
def test_copy_same_baseGlyph(self):
component = self.getComponent_copy()
copied = component.copy()
self.assertEqual(component.baseGlyph, copied.baseGlyph)
def test_copy_same_transformation(self):
component = self.getComponent_copy()
copied = component.copy()
self.assertEqual(component.transformation, copied.transformation)
def test_copy_same_offset(self):
component = self.getComponent_copy()
copied = component.copy()
self.assertEqual(component.offset, copied.offset)
def test_copy_same_scale(self):
component = self.getComponent_copy()
copied = component.copy()
self.assertEqual(component.scale, copied.scale)
def test_copy_not_identifier(self):
component = self.getComponent_copy()
component.generateIdentifier()
copied = component.copy()
self.assertNotEqual(component.identifier, copied.identifier)
def test_copy_generated_identifier_different(self):
component = self.getComponent_copy()
copied = component.copy()
component.generateIdentifier()
copied.generateIdentifier()
self.assertNotEqual(component.identifier, copied.identifier)
# ----
# Pens
# ----
# draw
def test_draw(self):
from fontTools.pens.recordingPen import RecordingPen
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
pen = RecordingPen()
component.draw(pen)
expected = [('addComponent', ('A', (1.0, 2.0, 3.0, 4.0, 5.0, 6.0)))]
self.assertEqual(
pen.value,
expected
)
# drawPoints
def test_drawPoints(self):
from fontPens.recordingPointPen import RecordingPointPen
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
identifier = component.getIdentifier()
pointPen = RecordingPointPen()
component.drawPoints(pointPen)
expected = [('addComponent',
(u'A', (1.0, 2.0, 3.0, 4.0, 5.0, 6.0)),
{'identifier': identifier})]
self.assertEqual(
pointPen.value,
expected
)
def test_drawPoints_legacy(self):
from .legacyPointPen import LegacyPointPen
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
component.getIdentifier()
pointPen = LegacyPointPen()
component.drawPoints(pointPen)
expected = [('addComponent', (u'A', (1.0, 2.0, 3.0, 4.0, 5.0, 6.0)), {})]
self.assertEqual(
pointPen.value,
expected
)
# --------------
# Transformation
# --------------
def getComponent_transform(self):
component = self.getComponent_generic()
component.transformation = (1, 2, 3, 4, 5, 6)
return component
# transformBy
def test_transformBy_valid_no_origin(self):
component = self.getComponent_transform()
component.transformBy((2, 0, 0, 3, -3, 2))
self.assertEqual(
component.transformation,
(2.0, 6.0, 6.0, 12.0, 7.0, 20.0)
)
def test_transformBy_valid_origin(self):
component = self.getComponent_transform()
component.transformBy((2, 0, 0, 2, 0, 0), origin=(1, 2))
self.assertEqual(
component.transformation,
(2.0, 4.0, 6.0, 8.0, 9.0, 10.0)
)
def test_transformBy_invalid_one_string_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.transformBy((1, 0, 0, 1, 0, "0"))
def test_transformBy_invalid_all_string_values(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.transformBy("1, 0, 0, 1, 0, 0")
def test_transformBy_invalid_int_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.transformBy(123)
# moveBy
def test_moveBy_valid(self):
component = self.getComponent_transform()
component.moveBy((-1, 2))
self.assertEqual(
component.transformation,
(1.0, 2.0, 3.0, 4.0, 4.0, 8.0)
)
def test_moveBy_invalid_one_string_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.moveBy((-1, "2"))
def test_moveBy_invalid_all_strings_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.moveBy("-1, 2")
def test_moveBy_invalid_int_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.moveBy(1)
# scaleBy
def test_scaleBy_valid_one_value_no_origin(self):
component = self.getComponent_transform()
component.scaleBy((-2))
self.assertEqual(
component.transformation,
(-2.0, -4.0, -6.0, -8.0, -10.0, -12.0)
)
def test_scaleBy_valid_two_values_no_origin(self):
component = self.getComponent_transform()
component.scaleBy((-2, 3))
self.assertEqual(
component.transformation,
(-2.0, 6.0, -6.0, 12.0, -10.0, 18.0)
)
def test_scaleBy_valid_two_values_origin(self):
component = self.getComponent_transform()
component.scaleBy((-2, 3), origin=(1, 2))
self.assertEqual(
component.transformation,
(-2.0, 6.0, -6.0, 12.0, -7.0, 14.0)
)
def test_scaleBy_invalid_one_string_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.scaleBy((-1, "2"))
def test_scaleBy_invalid_two_string_values(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.scaleBy("-1, 2")
def test_scaleBy_invalid_tuple_too_many_values(self):
component = self.getComponent_transform()
with self.assertRaises(ValueError):
component.scaleBy((-1, 2, -3))
# # rotateBy
def test_rotateBy_valid_no_origin(self):
component = self.getComponent_transform()
component.rotateBy(45)
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-0.707, 2.121, -0.707, 4.95, -0.707, 7.778]
)
def test_rotateBy_valid_origin(self):
component = self.getComponent_transform()
component.rotateBy(45, origin=(1, 2))
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-0.707, 2.121, -0.707, 4.95, 1.0, 7.657]
)
def test_rotateBy_invalid_string_value(self):
component = self.getComponent_transform()
with self.assertRaises(TypeError):
component.rotateBy("45")
def test_rotateBy_invalid_too_large_value_positive(self):
component = self.getComponent_transform()
with self.assertRaises(ValueError):
component.rotateBy(361)
def test_rotateBy_invalid_too_large_value_negative(self):
component = self.getComponent_transform()
with self.assertRaises(ValueError):
component.rotateBy(-361)
# skewBy
def test_skewBy_valid_no_origin_one_value(self):
component = self.getComponent_transform()
component.skewBy(100)
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-10.343, 2.0, -19.685, 4.0, -29.028, 6.0]
)
def test_skewBy_valid_no_origin_two_values(self):
component = self.getComponent_transform()
component.skewBy((100, 200))
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-10.343, 2.364, -19.685, 5.092, -29.028, 7.82]
)
def test_skewBy_valid_origin_one_value(self):
component = self.getComponent_transform()
component.skewBy(100, origin=(1, 2))
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-10.343, 2.0, -19.685, 4.0, -17.685, 6.0]
)
def test_skewBy_valid_origin_two_values(self):
component = self.getComponent_transform()
component.skewBy((100, 200), origin=(1, 2))
self.assertEqual(
[round(i, 3) for i in component.transformation],
[-10.343, 2.364, -19.685, 5.092, -17.685, 7.456]
)
# -------------
# Normalization
# -------------
# round
def test_round(self):
component = self.getComponent_generic()
component.transformation = (1.2, 2.2, 3.3, 4.4, 5.1, 6.6)
component.round()
self.assertEqual(
component.transformation,
(1.2, 2.2, 3.3, 4.4, 5, 7)
)
# decompose
def test_decompose_noParent(self):
component, _ = self.objectGenerator("component")
with self.assertRaises(FontPartsError):
component.decompose()
def test_decompose_digest(self):
from fontPens.digestPointPen import DigestPointPen
component = self.getComponent_generic()
glyph = component.glyph
glyph.layer[component.baseGlyph]
component.decompose()
pointPen = DigestPointPen()
glyph.drawPoints(pointPen)
expected = (
('beginPath', None),
((0, 0), u'line', False, 'point 0'),
((0, 100), u'line', False, 'point 1'),
((100, 100), u'line', False, 'point 2'),
((100, 0), u'line', False, 'point 3'),
'endPath'
)
self.assertEqual(
pointPen.getDigest(),
expected
)
def test_decompose_identifiers(self):
component = self.getComponent_generic()
glyph = component.glyph
baseGlyph = glyph.layer[component.baseGlyph]
baseGlyph[0].getIdentifier()
for point in baseGlyph[0].points:
point.getIdentifier()
component.decompose()
self.assertEqual(
[point.identifier for point in glyph[0].points],
[point.identifier for point in baseGlyph[0].points]
)
self.assertEqual(
glyph[0].identifier,
baseGlyph[0].identifier
)
def test_decompose_transformation(self):
from fontPens.digestPointPen import DigestPointPen
component = self.getComponent_generic()
component.scale = (2, 2)
glyph = component.glyph
glyph.layer[component.baseGlyph]
component.decompose()
pointPen = DigestPointPen()
glyph.drawPoints(pointPen)
expected = (
('beginPath', None),
((0, 0), u'line', False, 'point 0'),
((0, 200), u'line', False, 'point 1'),
((200, 200), u'line', False, 'point 2'),
((200, 0), u'line', False, 'point 3'),
'endPath'
)
self.assertEqual(
pointPen.getDigest(),
expected
)
# ------------
# Data Queries
# ------------
# bounds
def test_bounds_get(self):
component = self.getComponent_generic()
self.assertEqual(
component.bounds,
(0, 0, 100, 100)
)
def test_bounds_none(self):
component = self.getComponent_generic()
layer = component.layer
baseGlyph = layer[component.baseGlyph]
baseGlyph.clear()
self.assertIsNone(component.bounds)
def test_bounds_on_move(self):
component = self.getComponent_generic()
component.moveBy((0.1, -0.1))
self.assertEqual(
component.bounds,
(0.1, -0.1, 100.1, 99.9)
)
def test_bounds_on_scale(self):
component = self.getComponent_generic()
component.scaleBy((2, 0.5))
self.assertEqual(
component.bounds,
(0, 0, 200, 50)
)
def test_bounds_invalid_set(self):
component = self.getComponent_generic()
with self.assertRaises(FontPartsError):
component.bounds = (0, 0, 100, 100)
# pointInside
def test_pointInside_true(self):
component = self.getComponent_generic()
self.assertEqual(
component.pointInside((1, 1)),
True
)
def test_pointInside_false(self):
component = self.getComponent_generic()
self.assertEqual(
component.pointInside((-1, -1)),
False
)
# ----
# Hash
# ----
def test_hash_object_self(self):
component_one = self.getComponent_generic()
self.assertEqual(
hash(component_one),
hash(component_one)
)
def test_hash_object_other(self):
component_one = self.getComponent_generic()
component_two = self.getComponent_generic()
self.assertNotEqual(
hash(component_one),
hash(component_two)
)
def test_hash_object_self_variable_assignment(self):
component_one = self.getComponent_generic()
a = component_one
self.assertEqual(
hash(component_one),
hash(a)
)
def test_hash_object_other_variable_assignment(self):
component_one = self.getComponent_generic()
component_two = self.getComponent_generic()
a = component_one
self.assertNotEqual(
hash(component_two),
hash(a)
)
def test_is_hashable(self):
component_one = self.getComponent_generic()
self.assertTrue(
isinstance(component_one, collections.Hashable)
)
# --------
# Equality
# --------
def test_object_equal_self(self):
component_one = self.getComponent_generic()
self.assertEqual(
component_one,
component_one
)
def test_object_not_equal_other(self):
component_one = self.getComponent_generic()
component_two = self.getComponent_generic()
self.assertNotEqual(
component_one,
component_two
)
def test_object_equal_assigned_variable(self):
component_one = self.getComponent_generic()
a = component_one
a.baseGlyph = "C"
self.assertEqual(
component_one,
a
)
def test_object_not_equal_assigned_variable_other(self):
component_one = self.getComponent_generic()
component_two = self.getComponent_generic()
a = component_one
self.assertNotEqual(
component_two,
a
)
# ---------
# Selection
# ---------
def test_selected_true(self):
component = self.getComponent_generic()
try:
component.selected = False
except NotImplementedError:
return
component.selected = True
self.assertEqual(
component.selected,
True
)
def test_selected_false(self):
component = self.getComponent_generic()
try:
component.selected = False
except NotImplementedError:
return
self.assertEqual(
component.selected,
False
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Volumes
"""
#Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase, unittest
#from marvin.cloudstackException import *
from marvin.cloudstackAPI import (deleteVolume,
extractVolume,
resizeVolume)
#from marvin.sshClient import SshClient
from marvin.lib.utils import (cleanup_resources,
format_volume_to_ext3,
wait_until)
from marvin.lib.base import (ServiceOffering,
VirtualMachine,
Account,
Volume,
Host,
DiskOffering,
StoragePool,)
from marvin.lib.common import (get_domain,
get_suitable_test_template,
get_zone,
find_storage_pool_type,
get_pod,
list_disk_offering)
from marvin.lib.utils import checkVolumeSize
from marvin.codes import SUCCESS, FAILED, XEN_SERVER
from nose.plugins.attrib import attr
#Import System modules
import os
import urllib
import time
import tempfile
_multiprocess_shared_ = True
class TestCreateVolume(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestCreateVolume, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls._cleanup = []
cls.hypervisor = testClient.getHypervisorInfo()
cls.services['mode'] = cls.zone.networktype
cls.invalidStoragePoolType = False
#for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
# RBD storage type is required for data volumes for LXC
cls.invalidStoragePoolType = True
return
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"]
)
cls.sparse_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["sparse_disk_offering"]
)
cls.custom_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"],
custom=True
)
template = get_suitable_test_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"],
cls.hypervisor
)
if template == FAILED:
assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["zoneid"] = cls.zone.id
cls.services["template"] = template.id
cls.services["customdiskofferingid"] = cls.custom_disk_offering.id
cls.services["diskname"] = cls.services["volume"]["diskname"]
# Create VMs, NAT Rules etc
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.custom_disk_offering,
cls.account
]
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
if self.invalidStoragePoolType:
self.skipTest("Skipping test because of valid storage\
pool not available")
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_01_create_volume(self):
"""Test Volume creation for all Disk Offerings (incl. custom)
"""
# Validate the following
# 1. Create volumes from the different sizes
# 2. Verify the size of volume with actual size allocated
self.volumes = []
for k, v in self.services["volume_offerings"].items():
volume = Volume.create(
self.apiClient,
v,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.disk_offering.id
)
self.debug("Created a volume with ID: %s" % volume.id)
self.volumes.append(volume)
if self.virtual_machine.hypervisor == "KVM":
sparse_volume = Volume.create(
self.apiClient,
self.services,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid,
diskofferingid=self.sparse_disk_offering.id
)
self.debug("Created a sparse volume: %s" % sparse_volume.id)
self.volumes.append(sparse_volume)
volume = Volume.create_custom_disk(
self.apiClient,
self.services,
account=self.account.name,
domainid=self.account.domainid,
)
self.debug("Created a volume with custom offering: %s" % volume.id)
self.volumes.append(volume)
#Attach a volume with different disk offerings
#and check the memory allocated to each of them
for volume in self.volumes:
list_volume_response = Volume.list(
self.apiClient,
id=volume.id)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
volume.id,
self.virtual_machine.id
))
self.virtual_machine.attach_volume(
self.apiClient,
volume
)
try:
ssh = self.virtual_machine.get_ssh_client()
self.debug("Rebooting VM %s" % self.virtual_machine.id)
ssh.execute("reboot")
except Exception as e:
self.fail("SSH access failed for VM %s - %s" %
(self.virtual_machine.ipaddress, e))
# Poll listVM to ensure VM is started properly
timeout = self.services["timeout"]
while True:
time.sleep(self.services["sleep"])
# Ensure that VM is in running state
list_vm_response = VirtualMachine.list(
self.apiClient,
id=self.virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Running':
self.debug("VM state: %s" % vm.state)
break
if timeout == 0:
raise Exception(
"Failed to start VM (ID: %s) " % vm.id)
timeout = timeout - 1
vol_sz = str(list_volume_response[0].size)
ssh = self.virtual_machine.get_ssh_client(
reconnect=True
)
# Get the updated volume information
list_volume_response = Volume.list(
self.apiClient,
id=volume.id)
if list_volume_response[0].hypervisor.lower() == XEN_SERVER.lower():
volume_name = "/dev/xvd" + chr(ord('a') + int(list_volume_response[0].deviceid))
self.debug(" Using XenServer volume_name: %s" % (volume_name))
ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz)
elif list_volume_response[0].hypervisor.lower() == "kvm":
volume_name = "/dev/vd" + chr(ord('a') + int(list_volume_response[0].deviceid))
self.debug(" Using KVM volume_name: %s" % (volume_name))
ret = checkVolumeSize(ssh_handle=ssh,volume_name=volume_name,size_to_verify=vol_sz)
elif list_volume_response[0].hypervisor.lower() == "hyperv":
ret = checkVolumeSize(ssh_handle=ssh,volume_name="/dev/sdb",size_to_verify=vol_sz)
else:
ret = checkVolumeSize(ssh_handle=ssh,size_to_verify=vol_sz)
self.debug(" Volume Size Expected %s Actual :%s" %(vol_sz,ret[1]))
self.virtual_machine.detach_volume(self.apiClient, volume)
self.assertEqual(ret[0],SUCCESS,"Check if promised disk size actually available")
time.sleep(self.services["sleep"])
def tearDown(self):
#Clean up, terminate the created volumes
cleanup_resources(self.apiClient, self.cleanup)
return
@classmethod
def tearDownClass(cls):
try:
cls.apiclient = super(TestCreateVolume, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
class TestVolumes(cloudstackTestCase):
@classmethod
def setUpClass(cls):
testClient = super(TestVolumes, cls).getClsTestClient()
cls.apiclient = testClient.getApiClient()
cls.services = testClient.getParsedTestDataConfig()
cls._cleanup = []
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
cls.services['mode'] = cls.zone.networktype
cls.hypervisor = testClient.getHypervisorInfo()
cls.invalidStoragePoolType = False
#for LXC if the storage pool of type 'rbd' ex: ceph is not available, skip the test
if cls.hypervisor.lower() == 'lxc':
if not find_storage_pool_type(cls.apiclient, storagetype='rbd'):
# RBD storage type is required for data volumes for LXC
cls.invalidStoragePoolType = True
return
cls.disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["disk_offering"]
)
cls.resized_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["resized_disk_offering"]
)
cls.custom_resized_disk_offering = DiskOffering.create(
cls.apiclient,
cls.services["resized_disk_offering"],
custom=True
)
cls.template = get_suitable_test_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"],
cls.hypervisor
)
if cls.template == FAILED:
assert False, "get_suitable_test_template() failed to return template with description %s" % cls.services["ostype"]
cls.services["domainid"] = cls.domain.id
cls.services["zoneid"] = cls.zone.id
cls.services["template"] = cls.template.id
cls.services["diskofferingid"] = cls.disk_offering.id
cls.services['resizeddiskofferingid'] = cls.resized_disk_offering.id
cls.services['customresizeddiskofferingid'] = cls.custom_resized_disk_offering.id
# Create VMs, VMs etc
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
domainid=cls.domain.id
)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offerings"]["tiny"]
)
cls.virtual_machine = VirtualMachine.create(
cls.apiclient,
cls.services,
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
pools = StoragePool.list(cls.apiclient)
# cls.assertEqual(
# validateList(pools)[0],
# PASS,
# "storage pool list validation failed")
if cls.hypervisor.lower() == 'lxc' and cls.storage_pools.type.lower() != 'rbd':
raise unittest.SkipTest("Snapshots not supported on Hyper-V or LXC")
cls.volume = Volume.create(
cls.apiclient,
cls.services,
account=cls.account.name,
domainid=cls.account.domainid
)
cls._cleanup = [
cls.resized_disk_offering,
cls.custom_resized_disk_offering,
cls.service_offering,
cls.disk_offering,
cls.volume,
cls.account
]
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
def setUp(self):
self.apiClient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.attached = False
self.cleanup = []
if self.invalidStoragePoolType:
self.skipTest("Skipping test because valid storage pool not\
available")
def tearDown(self):
#Clean up, terminate the created volumes
if self.attached:
self.virtual_machine.detach_volume(self.apiClient, self.volume)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.stop(self.apiClient)
self.virtual_machine.start(self.apiClient)
cleanup_resources(self.apiClient, self.cleanup)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_02_attach_volume(self):
"""Attach a created Volume to a Running VM
"""
# Validate the following
# 1. shows list of volumes
# 2. "Attach Disk" pop-up box will display with list of instances
# 3. disk should be attached to instance successfully
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume.id,
self.virtual_machine.id
))
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
volume = list_volume_response[0]
self.assertNotEqual(
volume.virtualmachineid,
None,
"Check if volume state (attached) is reflected"
)
try:
#Format the attached volume to a known fs
format_volume_to_ext3(self.virtual_machine.get_ssh_client())
except Exception as e:
self.fail("SSH failed for VM: %s - %s" %
(self.virtual_machine.ipaddress, e))
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_03_download_attached_volume(self):
"""Download a Volume attached to a VM
"""
# Validate the following
# 1. download volume will fail with proper error message
# "Failed - Invalid state of the volume with ID:
# It should be either detached or the VM should be in stopped state
self.debug("Extract attached Volume ID: %s" % self.volume.id)
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
cmd = extractVolume.extractVolumeCmd()
cmd.id = self.volume.id
cmd.mode = "HTTP_DOWNLOAD"
cmd.zoneid = self.services["zoneid"]
# A proper exception should be raised;
# downloading attach VM is not allowed
with self.assertRaises(Exception):
self.apiClient.extractVolume(cmd)
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_04_delete_attached_volume(self):
"""Delete a Volume attached to a VM
"""
# Validate the following
# 1. delete volume will fail with proper error message
# "Failed - Invalid state of the volume with ID:
# It should be either detached or the VM should be in stopped state
self.debug("Trying to delete attached Volume ID: %s" %
self.volume.id)
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = self.volume.id
#Proper exception should be raised; deleting attach VM is not allowed
#with self.assertRaises(Exception):
with self.assertRaises(Exception):
self.apiClient.deleteVolume(cmd)
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="false")
def test_05_detach_volume(self):
"""Detach a Volume attached to a VM
"""
# Validate the following
# Data disk should be detached from instance and detached data disk
# details should be updated properly
self.debug(
"Detaching volume (ID: %s) from VM (ID: %s)" % (
self.volume.id,
self.virtual_machine.id
))
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.virtual_machine.detach_volume(self.apiClient, self.volume)
self.attached = False
#Sleep to ensure the current state will reflected in other calls
time.sleep(self.services["sleep"])
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
volume = list_volume_response[0]
self.assertEqual(
volume.virtualmachineid,
None,
"Check if volume state (detached) is reflected"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_06_download_detached_volume(self):
"""Download a Volume unattached to an VM
"""
# Validate the following
# 1. able to download the volume when its not attached to instance
self.debug("Extract detached Volume ID: %s" % self.volume.id)
self.virtual_machine.attach_volume(self.apiClient, self.volume)
#Sleep to ensure the current state will reflected in other calls
time.sleep(self.services["sleep"])
self.virtual_machine.detach_volume(self.apiClient, self.volume)
self.attached = False
#Sleep to ensure the current state will reflected in other calls
time.sleep(self.services["sleep"])
cmd = extractVolume.extractVolumeCmd()
cmd.id = self.volume.id
cmd.mode = "HTTP_DOWNLOAD"
cmd.zoneid = self.services["zoneid"]
extract_vol = self.apiClient.extractVolume(cmd)
#Attempt to download the volume and save contents locally
try:
formatted_url = urllib.unquote_plus(extract_vol.url)
self.debug("Attempting to download volume at url %s" % formatted_url)
response = urllib.urlopen(formatted_url)
self.debug("response from volume url %s" % response.getcode())
fd, path = tempfile.mkstemp()
self.debug("Saving volume %s to path %s" %(self.volume.id, path))
os.close(fd)
with open(path, 'wb') as fd:
fd.write(response.read())
self.debug("Saved volume successfully")
except Exception:
self.fail(
"Extract Volume Failed with invalid URL %s (vol id: %s)" \
% (extract_vol.url, self.volume.id)
)
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_07_resize_fail(self):
"""Test resize (negative) non-existent volume"""
# Verify the size is the new size is what we wanted it to be.
self.debug("Fail Resize Volume ID: %s" % self.volume.id)
# first, an invalid id
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = "invalid id"
cmd.diskofferingid = self.services['customresizeddiskofferingid']
success = False
try:
self.apiClient.resizeVolume(cmd)
except Exception as ex:
#print str(ex)
if "invalid" in str(ex):
success = True
self.assertEqual(
success,
True,
"ResizeVolume - verify invalid id is handled appropriately")
# Next, we'll try an invalid disk offering id
cmd.id = self.volume.id
cmd.diskofferingid = "invalid id"
success = False
try:
self.apiClient.resizeVolume(cmd)
except Exception as ex:
if "invalid" in str(ex):
success = True
self.assertEqual(
success,
True,
"ResizeVolume - verify disk offering is handled appropriately")
# try to resize a root disk with a disk offering, root can only be resized by size=
# get root vol from created vm
list_volume_response = Volume.list(
self.apiClient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
rootvolume = list_volume_response[0]
cmd.id = rootvolume.id
cmd.diskofferingid = self.services['diskofferingid']
with self.assertRaises(Exception):
self.apiClient.resizeVolume(cmd)
# Ok, now let's try and resize a volume that is not custom.
cmd.id = self.volume.id
cmd.diskofferingid = self.services['diskofferingid']
cmd.size = 4
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume.id,
self.virtual_machine.id)
)
#attach the volume
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
#stop the vm if it is on xenserver
hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
self.assertTrue(isinstance(hosts, list))
self.assertTrue(len(hosts) > 0)
self.debug("Found %s host" % hosts[0].hypervisor)
if hosts[0].hypervisor == "XenServer":
self.virtual_machine.stop(self.apiClient)
elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"):
self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V")
# Attempting to resize it should throw an exception, as we're using a non
# customisable disk offering, therefore our size parameter should be ignored
with self.assertRaises(Exception):
self.apiClient.resizeVolume(cmd)
if hosts[0].hypervisor == "XenServer":
self.virtual_machine.start(self.apiClient)
time.sleep(30)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_08_resize_volume(self):
"""Test resize a volume"""
# Verify the size is the new size is what we wanted it to be.
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume.id,
self.virtual_machine.id
))
self.virtual_machine.attach_volume(self.apiClient, self.volume)
self.attached = True
hosts = Host.list(self.apiClient, id=self.virtual_machine.hostid)
self.assertTrue(isinstance(hosts, list))
self.assertTrue(len(hosts) > 0)
self.debug("Found %s host" % hosts[0].hypervisor)
if hosts[0].hypervisor == "XenServer":
self.virtual_machine.stop(self.apiClient)
elif hosts[0].hypervisor.lower() in ("vmware", "hyperv"):
self.skipTest("Resize Volume is unsupported on VmWare and Hyper-V")
# resize the data disk
self.debug("Resize Volume ID: %s" % self.volume.id)
self.services["disk_offering"]["disksize"] = 20
disk_offering_20_GB = DiskOffering.create(
self.apiclient,
self.services["disk_offering"]
)
self.cleanup.append(disk_offering_20_GB)
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = self.volume.id
cmd.diskofferingid = disk_offering_20_GB.id
self.apiClient.resizeVolume(cmd)
count = 0
success = False
while count < 3:
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id,
type='DATADISK'
)
for vol in list_volume_response:
if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_20_GB.disksize) * (1024** 3)) and vol.state == 'Ready':
success = True
if success:
break
else:
time.sleep(10)
count += 1
self.assertEqual(
success,
True,
"Check if the data volume resized appropriately"
)
can_shrink = False
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id,
type='DATADISK'
)
storage_pool_id = [x.storageid for x in list_volume_response if x.id == self.volume.id][0]
storage = StoragePool.list(self.apiclient, id=storage_pool_id)[0]
# At present only CLVM supports shrinking volumes
if storage.type.lower() == "clvm":
can_shrink = True
if can_shrink:
self.services["disk_offering"]["disksize"] = 10
disk_offering_10_GB = DiskOffering.create(
self.apiclient,
self.services["disk_offering"]
)
self.cleanup.append(disk_offering_10_GB)
cmd = resizeVolume.resizeVolumeCmd()
cmd.id = self.volume.id
cmd.diskofferingid = disk_offering_10_GB.id
cmd.shrinkok = "true"
self.apiClient.resizeVolume(cmd)
count = 0
success = False
while count < 3:
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
for vol in list_volume_response:
if vol.id == self.volume.id and int(vol.size) == (int(disk_offering_10_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
success = True
if success:
break
else:
time.sleep(10)
count += 1
self.assertEqual(
success,
True,
"Check if the root volume resized appropriately"
)
#start the vm if it is on xenserver
if hosts[0].hypervisor == "XenServer":
self.virtual_machine.start(self.apiClient)
time.sleep(30)
return
@attr(tags = ["advanced", "advancedns", "smoke","basic"], required_hardware="false")
def test_09_delete_detached_volume(self):
"""Delete a Volume unattached to an VM
"""
# Validate the following
# 1. volume should be deleted successfully and listVolume should not
# contain the deleted volume details.
# 2. "Delete Volume" menu item not shown under "Actions" menu.
# (UI should not allow to delete the volume when it is attached
# to instance by hiding the menu Item)
self.debug("Delete Volume ID: %s" % self.volume.id)
self.volume_1 = Volume.create(
self.apiclient,
self.services,
account=self.account.name,
domainid=self.account.domainid
)
self.virtual_machine.attach_volume(self.apiClient, self.volume_1)
self.virtual_machine.detach_volume(self.apiClient, self.volume_1)
cmd = deleteVolume.deleteVolumeCmd()
cmd.id = self.volume_1.id
self.apiClient.deleteVolume(cmd)
list_volume_response = Volume.list(
self.apiClient,
id=self.volume_1.id,
type='DATADISK'
)
self.assertEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
return
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_10_list_volumes(self):
# Validate the following
#
# 1. List Root Volume and waits until it has the newly introduced attributes
#
# 2. Verifies return attributes has values different from none, when instance is running
#
list_vm = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id)[0]
host = Host.list(
self.apiclient,
type='Routing',
id=list_vm.hostid
)[0]
list_pods = get_pod(self.apiclient, self.zone.id, host.podid)
root_volume = self.wait_for_attributes_and_return_root_vol()
self.assertTrue(hasattr(root_volume, "utilization"))
self.assertTrue(root_volume.utilization is not None)
self.assertTrue(hasattr(root_volume, "virtualsize"))
self.assertTrue(root_volume.virtualsize is not None)
self.assertTrue(hasattr(root_volume, "physicalsize"))
self.assertTrue(root_volume.physicalsize is not None)
self.assertTrue(hasattr(root_volume, "vmname"))
self.assertEqual(root_volume.vmname, list_vm.name)
self.assertTrue(hasattr(root_volume, "clustername"))
self.assertTrue(root_volume.clustername is not None)
self.assertTrue(hasattr(root_volume, "clusterid"))
self.assertTrue(root_volume.clusterid is not None)
self.assertTrue(hasattr(root_volume, "storageid"))
self.assertTrue(root_volume.storageid is not None)
self.assertTrue(hasattr(root_volume, "storage"))
self.assertTrue(root_volume.storage is not None)
self.assertTrue(hasattr(root_volume, "zoneid"))
self.assertEqual(root_volume.zoneid, self.zone.id)
self.assertTrue(hasattr(root_volume, "zonename"))
self.assertEqual(root_volume.zonename, self.zone.name)
self.assertTrue(hasattr(root_volume, "podid"))
self.assertEqual(root_volume.podid, list_pods.id)
self.assertTrue(hasattr(root_volume, "podname"))
self.assertEqual(root_volume.podname, list_pods.name)
@attr(tags = ["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_11_attach_volume_with_unstarted_vm(self):
"""Attach a created Volume to a unstarted VM
"""
# Validate the following
# 1. Attach to a vm in startvm=false state works and vm can be started afterwards.
# 2. shows list of volumes
# 3. "Attach Disk" pop-up box will display with list of instances
# 4. disk should be attached to instance successfully
test_vm = VirtualMachine.create(
self.apiclient,
self.services,
templateid=self.template.id,
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.services["mode"],
startvm=False
)
self.debug(
"Attaching volume (ID: %s) to VM (ID: %s)" % (
self.volume.id,
test_vm.id
))
test_vm.attach_volume(self.apiClient, self.volume)
test_vm.start(self.apiClient)
list_volume_response = Volume.list(
self.apiClient,
id=self.volume.id
)
self.assertEqual(
isinstance(list_volume_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_volume_response,
None,
"Check if volume exists in ListVolumes"
)
volume = list_volume_response[0]
self.assertNotEqual(
volume.virtualmachineid,
None,
"Check if volume state (attached) is reflected"
)
#Sleep to ensure the current state will reflected in other calls
time.sleep(self.services["sleep"])
test_vm.detach_volume(self.apiClient, self.volume)
self.cleanup.append(test_vm)
return
def wait_for_attributes_and_return_root_vol(self):
def checkVolumeResponse():
list_volume_response = Volume.list(
self.apiClient,
virtualmachineid=self.virtual_machine.id,
type='ROOT',
listall=True
)
if isinstance(list_volume_response, list) and list_volume_response[0].virtualsize is not None:
return True, list_volume_response[0]
return False, None
# sleep interval is 1s, retries is 360, this will sleep atmost 360 seconds, or 6 mins
res, response = wait_until(1, 360, checkVolumeResponse)
if not res:
self.fail("Failed to return root volume response")
return response
@attr(tags=["advanced", "advancedns", "smoke", "basic"], required_hardware="true")
def test_11_migrate_volume_and_change_offering(self):
# Validates the following
#
# 1. Creates a new Volume with a small disk offering
#
# 2. Migrates the Volume to another primary storage and changes the offering
#
# 3. Verifies the Volume has new offering when migrated to the new storage.
small_offering = list_disk_offering(
self.apiclient,
name = "Small"
)[0]
large_offering = list_disk_offering(
self.apiclient,
name = "Large"
)[0]
volume = Volume.create(
self.apiClient,
self.services,
zoneid = self.zone.id,
account = self.account.name,
domainid = self.account.domainid,
diskofferingid = small_offering.id
)
self.debug("Created a small volume: %s" % volume.id)
self.virtual_machine.attach_volume(self.apiclient, volume=volume)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.stop(self.apiclient)
pools = StoragePool.listForMigration(
self.apiclient,
id=volume.id
)
pool = None
if pools and len(pools) > 0:
pool = pools[0]
else:
raise self.skipTest("Not enough storage pools found, skipping test")
if hasattr(pool, 'tags'):
StoragePool.update(self.apiclient, id=pool.id, tags="")
self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id))
livemigrate = False
if self.virtual_machine.hypervisor.lower() == "vmware" or self.virtual_machine.hypervisor.lower() == 'xenserver':
livemigrate = True
Volume.migrate(
self.apiclient,
volumeid = volume.id,
storageid = pool.id,
newdiskofferingid = large_offering.id,
livemigrate = livemigrate
)
if self.virtual_machine.hypervisor == "KVM":
self.virtual_machine.start(self.apiclient
)
migrated_vol = Volume.list(
self.apiclient,
id = volume.id
)[0]
self.assertEqual(
migrated_vol.diskofferingname,
large_offering.name,
"Offering name did not match with the new one "
)
return
|
|
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ 'eff583', 'efwb9y', 'ega7a8', 'egriu6', 'eh7773', 'ehmd5j', 'ei267r' ]
flaskport = 8949
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
|
#!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import os
import numpy as np
import wradlib.util as util
import unittest
import datetime as dt
class HelperFunctionsTest(unittest.TestCase):
def test__shape_to_size(self):
self.assertEqual(util._shape_to_size((10, 10, 10)), 10 * 10 * 10)
def test__idvalid(self):
data = np.array(
[np.inf, np.nan, -99., 99, -9999., -9999, -10., -5., 0., 5., 10.])
self.assertTrue(
np.allclose(util._idvalid(data), np.array([6, 7, 8, 9, 10])))
self.assertTrue(np.allclose(util._idvalid(data, minval=-5., maxval=5.),
np.array([7, 8, 9])))
self.assertTrue(
np.allclose(util._idvalid(data, isinvalid=[-9999], maxval=5.),
np.array([2, 6, 7, 8, 9])))
def test_issequence(self):
self.assertTrue(util.issequence([0, 1, 2]))
self.assertFalse(util.issequence(1))
self.assertFalse(util.issequence('str'))
def test_trapezoid(self):
data = np.arange(0., 30.1, 0.1)
correct = np.arange(0., 1., 0.01)
correct = np.concatenate((correct, np.ones(101), correct[::-1]))
result = util.trapezoid(data, 0., 10., 20., 30.)
np.testing.assert_array_almost_equal(result, correct, decimal=9)
def test_prob_round(self):
np.random.seed(42)
np.testing.assert_equal(42., util.prob_round(42.4242))
np.random.seed(44)
np.testing.assert_equal(43., util.prob_round(42.4242))
def test_get_wradlib_data_path(self):
wrl_data_path = os.environ.get('WRADLIB_DATA', None)
del os.environ['WRADLIB_DATA']
self.assertRaises(EnvironmentError,
lambda: util.get_wradlib_data_path())
filename = 'rainbow/2013070308340000dBuZ.azi'
os.environ['WRADLIB_DATA'] = os.path.join(wrl_data_path, filename)
self.assertRaises(EnvironmentError,
lambda: util.get_wradlib_data_path())
os.environ['WRADLIB_DATA'] = wrl_data_path
filename = os.path.join(wrl_data_path, "test.dat")
self.assertRaises(EnvironmentError,
lambda: util.get_wradlib_data_file(filename))
def test_from_to(self):
out = util.from_to("2000-01-01 00:00:00",
"2000-01-02 00:00:00",
86400)
shouldbe = [dt.datetime(2000, 1, 1, 0, 0),
dt.datetime(2000, 1, 2, 0, 0)]
self.assertEqual(out, shouldbe)
def test_calculate_polynomial(self):
data = np.arange(0, 10, 1)
w = np.arange(0, 5, 1)
out = np.array([0, 10, 98, 426, 1252, 2930, 5910, 10738, 18056, 28602])
poly = util.calculate_polynomial(data, w)
np.testing.assert_allclose(poly, out, rtol=1e-12)
def test_import_optional(self):
m = util.import_optional('math')
np.testing.assert_equal(m.log10(100), 2.0)
mod = util.import_optional('h8x')
self.assertRaises(AttributeError, lambda: mod.test())
def test_maximum_intensity_projection(self):
angle = 0.0
elev = 0.0
filename = util.get_wradlib_data_file('misc/polar_dBZ_tur.gz')
data = np.loadtxt(filename)
# we need to have meter here for the georef function inside mip
d1 = np.arange(data.shape[1], dtype=np.float) * 1000
d2 = np.arange(data.shape[0], dtype=np.float)
data = np.roll(data, (d2 >= angle).nonzero()[0][0], axis=0)
# calculate max intensity proj
util.maximum_intensity_projection(data, r=d1, az=d2,
angle=angle, elev=elev)
util.maximum_intensity_projection(data, autoext=False)
def test_roll2d_polar(self):
filename = util.get_wradlib_data_file('misc/polar_dBZ_tur.gz')
data = np.loadtxt(filename)
result1 = util.roll2d_polar(data, 1, axis=0)
result2 = util.roll2d_polar(data, -1, axis=0)
result3 = util.roll2d_polar(data, 1, axis=1)
result4 = util.roll2d_polar(data, -1, axis=1)
np.testing.assert_equal(result1, np.roll(data, 1, axis=0))
np.testing.assert_equal(result2, np.roll(data, -1, axis=0))
np.testing.assert_equal(result3[:, 1:],
np.roll(data, 1, axis=1)[:, 1:])
np.testing.assert_equal(result4[:, :-1],
np.roll(data, -1, axis=1)[:, :-1])
def test_medfilt_along_axis(self):
x = np.arange(10).reshape((2, 5)).astype("f4")
shouldbe = np.array([[0., 1., 2., 3., 3.],
[5., 6., 7., 8., 8.]])
result = util.medfilt_along_axis(x, 3)
np.testing.assert_allclose(result, shouldbe)
def test_gradient_along_axis(self):
x = np.arange(10).reshape((2, 5)).astype("f4") ** 2
result = util.gradient_along_axis(x)
shouldbe = np.array([[1., 11., 2., 4., 6., 7.],
[1., 11., 12., 14., 16., 17.]])
np.testing.assert_allclose(result, shouldbe)
def test_gradient_from_smoothed(self):
x = np.arange(10).reshape((2, 5)).astype("f4") ** 2
result = util.gradient_from_smoothed(x)
shouldbe = np.array([[1., 11., 2., 1.5, 0., 0.],
[1., 11., 12., 6.5, 0., 0.]])
np.testing.assert_allclose(result, shouldbe)
class TestUtil(unittest.TestCase):
def setUp(self):
np.random.seed(42)
img = np.zeros((36, 10), dtype=np.float32)
img[2, 2] = 1 # isolated pixel
img[5, 6:8] = 1 # line
img[20, :] = 1 # spike
img[9:12, 4:7] = 1 # precip field
# img[15:17,5:7] = np.nan # nodata as nans
self.img = img
def test_filter_window_polar(self):
rscale = 250
# nrays, nbins = self.img.shape
# ascale = 2 * np.pi / self.img.shape[0]
mean = util.filter_window_polar(self.img, 300, "maximum", rscale)
mean2 = util.filter_window_polar(self.img, 300, "maximum", rscale,
random=True)
correct = np.array([[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 1., 1., 1., 1., 0.],
[0., 1., 1., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0., 0., 0., 0.]])
correct2 = np.array([[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 1., 1., 1., 1., 0.],
[0., 1., 1., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 0., 0., 1., 1., 1., 1., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0., 0., 0., 0.]])
np.testing.assert_array_equal(mean, correct)
np.testing.assert_array_equal(mean2, correct2)
def test_half_power_radius(self):
hpr = util.half_power_radius(np.arange(0, 100000, 10000), 1.0)
res = np.array([0., 87.266, 174.533, 261.799, 349.066, 436.332,
523.599, 610.865, 698.132, 785.398])
self.assertTrue(np.allclose(hpr, res))
def test_filter_window_cartesian(self):
correct = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
self.assertTrue(np.allclose(
util.filter_window_cartesian(self.img, 500., "maximum",
np.array([250., 250])),
correct))
class FindBboxIndicesTest(unittest.TestCase):
def setUp(self):
xarr = np.linspace(500, 1000, num=6)
yarr = np.linspace(550, 950, num=9)
gridx, gridy = np.meshgrid(xarr, yarr)
self.grid = np.dstack((gridx, gridy))
self.outside = [400, 400, 1100, 1100]
self.inside1 = [599, 599, 901, 901]
self.inside2 = [601, 601, 899, 899]
def test_find_bbox_indices(self):
bbind = util.find_bbox_indices(self.grid, self.outside)
self.assertTrue(np.array_equal(bbind, [0, 0, self.grid.shape[1],
self.grid.shape[0]]))
bbind = util.find_bbox_indices(self.grid, self.inside1)
self.assertTrue(np.array_equal(bbind, [0, 0, 5, 8]))
bbind = util.find_bbox_indices(self.grid, self.inside2)
self.assertTrue(np.array_equal(bbind, [1, 1, 4, 7]))
|
|
from Configuration import BasketballPlayerDatabase, Root_URL
from BasketballPlayer import BasketballPlayer
import pickle
from flask import Flask, request, render_template, redirect, send_from_directory
from htmltemplate import Template
from PIL import Image
import base64
import os
import uuid
import psycopg2
app = Flask(__name__)
app.secret_key = os.getenv('basketball_secret_key')
Playerdict = {}
# Save player list to Database
def save_database():
Database_file = open(BasketballPlayerDatabase, 'wb')
pickle.dump(Playerdict, Database_file)
Database_file.close()
Database_file = open(BasketballPlayerDatabase, 'rb')
database_file_pickle = Database_file.read()
Database_file.close()
#conn = psycopg2.connect(database=Database, user=Database_user, password=Database_password, host=Database_host,
# port=Database_port)
conn = psycopg2.connect(os.getenv('basketball_database_url'))
cursor = conn.cursor()
cursor.execute('truncate "BasketballPickle"')
conn.commit()
cursor.execute('INSERT INTO "BasketballPickle"(pickle_data) VALUES (%s)', (psycopg2.Binary(database_file_pickle),))
conn.commit()
# Load the Player list database
def load_database():
try:
global Playerdict
#conn = psycopg2.connect(database=Database, user=Database_user, password=Database_password, host=Database_host,
# port=Database_port)
conn = psycopg2.connect(os.getenv('basketball_database_url'))
cursor = conn.cursor()
cursor.execute('select pickle_data from "BasketballPickle" LIMIT 1') #
mypickle = cursor.fetchone()[0]
Playerdict = pickle.loads(mypickle)
return Playerdict
except:
pass # do nothing. no database to load
def setup_database():
#conn = psycopg2.connect(database=Database, user=Database_user, password=Database_password, host=Database_host,
# port=Database_port)
conn = psycopg2.connect(os.getenv('basketball_database_url'))
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE public."BasketballPickle"
(
pickle_data bytea
)
WITH (
OIDS=FALSE
);
''')
conn.commit()
@app.route('/initdb')
def init_database():
setup_database()
return redirect('/player_list')
@app.route('/player', methods=['GET', 'POST'])
def new_player():
load_database()
if request.method == 'POST':
file = request.files['Image_Input']
if file.filename != '':
img = Image.open(file)
img.thumbnail((500, 555))
img.save(file.filename)
img.close()
img = open(file.filename, 'rb')
New_Basketball_Player = BasketballPlayer(name=request.form['Name_Input'],
age=request.form['Age_Input'],
height=request.form['Height_Input'],
weight=request.form['Weight_Input'],
team=request.form['Team_Input'],
number=request.form['Number_Input'],
position=request.form['Position_Input'],
points_per_game=request.form['Points_Per_Game_Input'],
assist_per_game=request.form['Assist_Per_Game_Input'],
blocks_per_game=request.form['Blocks_Per_Game_Input'],
rebounds_per_game=request.form['Rebounds_Per_Game_Input'],
steals_per_game=request.form['Steals_Per_Game_Input'],
fouls_per_game=request.form['Fouls_Per_Game_Input'],
freethrows_per_game=request.form['Freethrows_Per_Game_Input'],
championships_won=request.form['Championships_Won_Input'],
image=img.read(),
id=uuid.uuid4())
img.close()
os.remove(file.filename)
else:
New_Basketball_Player = BasketballPlayer(name=request.form['Name_Input'],
age=request.form['Age_Input'],
height=request.form['Height_Input'],
weight=request.form['Weight_Input'],
team=request.form['Team_Input'],
number=request.form['Number_Input'],
position=request.form['Position_Input'],
points_per_game=request.form['Points_Per_Game_Input'],
assist_per_game=request.form['Assist_Per_Game_Input'],
blocks_per_game=request.form['Blocks_Per_Game_Input'],
rebounds_per_game=request.form['Rebounds_Per_Game_Input'],
steals_per_game=request.form['Steals_Per_Game_Input'],
fouls_per_game=request.form['Fouls_Per_Game_Input'],
freethrows_per_game=request.form['Freethrows_Per_Game_Input'],
championships_won=request.form['Championships_Won_Input'],
id=uuid.uuid4())
Playerdict[str(New_Basketball_Player.id)] = New_Basketball_Player
save_database()
return redirect('/player_list')
if request.method == 'GET':
with open('PlayerEditor.html') as player_list_file:
player_editor = open('PlayerEditor.html').read()
def render_PlayerAtr(node, playerobject):
node.ActionPathAtr.atts['action'] = '/player' # + str(playerobject.id)
node.ActionPathAtr.NameAtr.atts['value'] = ''
node.ActionPathAtr.TeamAtr.atts['value'] = ''
node.ActionPathAtr.AgeAtr.atts['value'] = ''
node.ActionPathAtr.JerseyAtr.atts['value'] = ''
node.ActionPathAtr.PositionAtr.atts['value'] = ''
node.ActionPathAtr.HeightAtr.atts['value'] = ''
node.ActionPathAtr.WeightAtr.atts['value'] = ''
node.ActionPathAtr.ChampionshipsAtr.atts['value'] = ''
node.ActionPathAtr.PointsAtr.atts['value'] = ''
node.ActionPathAtr.StealsAtr.atts['value'] = ''
node.ActionPathAtr.BlocksAtr.atts['value'] = ''
node.ActionPathAtr.ReboundsAtr.atts['value'] = ''
node.ActionPathAtr.FoulsAtr.atts['value'] = ''
node.ActionPathAtr.AssistAtr.atts['value'] = ''
node.ActionPathAtr.FreeThrowAtr.atts['value'] = ''
# node.ActionPathAtr.IdAtr.atts['value'] = playerobject.id
player_editor_template = Template(player_editor)
# return player_editor_template.render(render_PlayerAtr)
return player_editor_template.render(render_PlayerAtr, [])
@app.route('/player/<id>', methods=['GET', 'POST'])
def get_player(id):
global Playerdict
load_database()
if request.method == 'GET':
playerobjectfromdictionary = Playerdict[id]
if playerobjectfromdictionary.image is not None:
data64 = u'data:%s;base64,%s' % (
'image/jpg', base64.encodebytes(playerobjectfromdictionary.image).decode('utf8'))
else:
data64 = None
# return render_template('test.html', form=f, img=data64)
with open('PlayerEditor.html') as player_list_file:
list_page = open('PlayerEditor.html').read()
def render_PlayerAtr(node, playerobject):
node.ActionPathAtr.atts['action'] = '/player/' + str(playerobject.id)
node.ActionPathAtr.NameAtr.atts['value'] = playerobject.name
node.ActionPathAtr.TeamAtr.atts['value'] = playerobject.team
node.ActionPathAtr.AgeAtr.atts['value'] = playerobject.age
node.ActionPathAtr.JerseyAtr.atts['value'] = playerobject.number
node.ActionPathAtr.PositionAtr.atts['value'] = playerobject.position
node.ActionPathAtr.HeightAtr.atts['value'] = playerobject.height
node.ActionPathAtr.WeightAtr.atts['value'] = playerobject.weight
node.ActionPathAtr.ChampionshipsAtr.atts['value'] = playerobject.championships_won
node.ActionPathAtr.PointsAtr.atts['value'] = playerobject.points_per_game
node.ActionPathAtr.StealsAtr.atts['value'] = playerobject.steals_per_game
node.ActionPathAtr.BlocksAtr.atts['value'] = playerobject.blocks_per_game
node.ActionPathAtr.ReboundsAtr.atts['value'] = playerobject.rebounds_per_game
node.ActionPathAtr.FoulsAtr.atts['value'] = playerobject.fouls_per_game
node.ActionPathAtr.AssistAtr.atts['value'] = playerobject.assist_per_game
node.ActionPathAtr.FreeThrowAtr.atts['value'] = playerobject.freethrows_per_game
node.ActionPathAtr.DisplayImgAtr.atts['src'] = data64
player_editor_template = Template(list_page)
return player_editor_template.render(render_PlayerAtr, playerobjectfromdictionary)
if request.method == 'POST':
updated_basketball_player = Playerdict[id]
file = request.files['Image_Input']
read_image = None
if file.filename != '':
img = Image.open(file)
img.thumbnail((500, 555))
img.save(file.filename)
img.close()
img = open(file.filename, 'rb')
read_image = img.read()
img.close()
os.remove(file.filename)
updated_basketball_player.name = request.form['Name_Input']
updated_basketball_player.age = request.form['Age_Input']
updated_basketball_player.height = request.form['Height_Input']
updated_basketball_player.weight = request.form['Weight_Input']
updated_basketball_player.team = request.form['Team_Input']
updated_basketball_player.number = request.form['Number_Input']
updated_basketball_player.position = request.form['Position_Input']
updated_basketball_player.points_per_game = request.form['Points_Per_Game_Input']
updated_basketball_player.assist_per_game = request.form['Assist_Per_Game_Input']
updated_basketball_player.blocks_per_game = request.form['Blocks_Per_Game_Input']
updated_basketball_player.rebounds_per_game = request.form['Rebounds_Per_Game_Input']
updated_basketball_player.fouls_per_game = request.form['Fouls_Per_Game_Input']
updated_basketball_player.steals_per_game = request.form['Steals_Per_Game_Input']
updated_basketball_player.freethrows_per_game = request.form['Freethrows_Per_Game_Input']
updated_basketball_player.championships_won = request.form['Championships_Won_Input']
if read_image is not None:
updated_basketball_player.image = read_image
Playerdict[id] = updated_basketball_player
save_database()
return redirect('/player_list')
@app.route('/player/<id>/delete')
def delete_player(id):
global Playerdict
load_database()
if request.method == 'GET':
del Playerdict[id]
save_database()
return redirect('/player_list')
@app.route('/')
def main_page():
return redirect('/player_list')
@app.route('/player_list')
def player_list():
load_database()
with open('PlayerList.html') as player_list_file:
list_page = open('PlayerList.html').read()
def render_template2(node):
node.PlayerAtr.repeat(render_PlayerAtr, Playerdict)
def render_PlayerAtr(node, playersection):
node.NameAtr.text = Playerdict[playersection].name
node.TeamAtr.text = Playerdict[playersection].team
node.AgeAtr.text = Playerdict[playersection].age
node.JerseyAtr.text = Playerdict[playersection].number
node.PositionAtr.text = Playerdict[playersection].position
node.HeightAtr.text = Playerdict[playersection].height
node.WeightAtr.text = Playerdict[playersection].weight
node.ChampionshipsAtr.text = Playerdict[playersection].championships_won
node.PointsAtr.text = Playerdict[playersection].points_per_game
node.StealsAtr.text = Playerdict[playersection].steals_per_game
node.BlocksAtr.text = Playerdict[playersection].blocks_per_game
node.ReboundsAtr.text = Playerdict[playersection].rebounds_per_game
node.FoulsAtr.text = Playerdict[playersection].fouls_per_game
node.AssistAtr.text = Playerdict[playersection].assist_per_game
node.FreeThrowAtr.text = Playerdict[playersection].freethrows_per_game
node.IdAtr.text = Playerdict[playersection].id
node.Delete.atts['href'] = '/player/' + str(Playerdict[playersection].id) + '/delete'
node.Edit.atts['href'] = '/player/' + str(Playerdict[playersection].id)
player_list_template = Template(list_page)
return player_list_template.render(render_template2)
@app.route('/<path:path>')
def send_js(path):
return send_from_directory('', path)
'''
if __name__ == '__main__':
from cherrypy import wsgiserver
d = wsgiserver.WSGIPathInfoDispatcher({'/': app})
server = wsgiserver.CherryPyWSGIServer(('192.168.1.235', 5000), d)
try:
load_database()
server.start()
except KeyboardInterrupt:
server.stop()
'''
if __name__ == '__main__':
load_database()
app.run(debug=True)
|
|
# Copyright (c) 2012 Santosh Philip
# Copyright (c) 2016 Jamie Bull
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""Sub class Bunch to represent an IDF object.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import itertools
from munch import Munch as Bunch
from eppy.bunchhelpers import matchfieldnames
import eppy.function_helpers as fh
class BadEPFieldError(AttributeError):
"""An Exception"""
pass
class RangeError(ValueError):
"""An Exception"""
pass
def almostequal(first, second, places=7, printit=True):
"""
Test if two values are equal to a given number of places.
This is based on python's unittest so may be covered by Python's
license.
"""
if first == second:
return True
if round(abs(second - first), places) != 0:
if printit:
print(round(abs(second - first), places))
print("notalmost: %s != %s to %i places" % (first, second, places))
return False
else:
return True
def somevalues(ddtt):
"""returns some values"""
return ddtt.Name, ddtt.Construction_Name, ddtt.obj
def extendlist(lst, i, value=''):
"""extend the list so that you have i-th value"""
if i < len(lst):
pass
else:
lst.extend([value, ] * (i - len(lst) + 1))
def return42(self, *args, **kwargs):
# proof of concept - to be removed
return 42
def addfunctions(abunch):
"""add functions to epbunch"""
# proof of concept - remove
abunch['__functions'].update({'return42':return42})
abunch['__functions'].update({'buildingname':fh.buildingname})
# proof of concept
key = abunch.obj[0].upper()
#-----------------
# TODO : alternate strategy to avoid listing the objkeys in snames
# check if epbunch has field "Zone_Name" or "Building_Surface_Name"
# and is in group u'Thermal Zones and Surfaces'
# then it is likely to be a surface.
# of course we need to recode for surfaces that do not have coordinates :-(
# or we can filter those out since they do not have
# the field "Number_of_Vertices"
snames = [
"BuildingSurface:Detailed",
"Wall:Detailed",
"RoofCeiling:Detailed",
"Floor:Detailed",
"FenestrationSurface:Detailed",
"Shading:Site:Detailed",
"Shading:Building:Detailed",
"Shading:Zone:Detailed", ]
snames = [sname.upper() for sname in snames]
if key in snames:
func_dict = {
'area': fh.area,
'height': fh.height, # not working correctly
'width': fh.width, # not working correctly
'azimuth': fh.azimuth,
'tilt': fh.tilt,
'coords': fh.getcoords, # needed for debugging
}
abunch.__functions.update(func_dict)
#-----------------
abunch.getfieldidd
names = [
"CONSTRUCTION",
"MATERIAL",
"MATERIAL:AIRGAP",
"MATERIAL:INFRAREDTRANSPARENT",
"MATERIAL:NOMASS",
"MATERIAL:ROOFVEGETATION",
"WINDOWMATERIAL:BLIND",
"WINDOWMATERIAL:GLAZING",
"WINDOWMATERIAL:GLAZING:REFRACTIONEXTINCTIONMETHOD",
"WINDOWMATERIAL:GAP",
"WINDOWMATERIAL:GAS",
"WINDOWMATERIAL:GASMIXTURE",
"WINDOWMATERIAL:GLAZINGGROUP:THERMOCHROMIC",
"WINDOWMATERIAL:SCREEN",
"WINDOWMATERIAL:SHADE",
"WINDOWMATERIAL:SIMPLEGLAZINGSYSTEM",
]
if key in names:
func_dict = {
'rvalue': fh.rvalue,
'ufactor': fh.ufactor,
'rvalue_ip': fh.rvalue_ip,# quick fix for Santosh. Needs to thought thru
'ufactor_ip': fh.ufactor_ip,# quick fix for Santosh. Needs to thought thru
'heatcapacity': fh.heatcapacity,
}
abunch.__functions.update(func_dict)
# =====
# code for references
#-----------------
# add function zonesurfaces
if key == 'ZONE':
func_dict = {'zonesurfaces':fh.zonesurfaces}
abunch.__functions.update(func_dict)
#-----------------
# add function subsurfaces
# going to cheat here a bit
# check if epbunch has field "Zone_Name"
# and is in group u'Thermal Zones and Surfaces'
# then it is likely to be a surface attached to a zone
fields = abunch.fieldnames
try:
group = abunch.getfieldidd('key')['group']
except KeyError as e: # some pytests don't have group
group = None
if group == u'Thermal Zones and Surfaces':
if "Zone_Name" in fields:
func_dict = {'subsurfaces':fh.subsurfaces}
abunch.__functions.update(func_dict)
return abunch
class EpBunch(Bunch):
"""
Fields, values, and descriptions of fields in an EnergyPlus IDF object
stored in a `bunch` which is a `dict` extended to allow access to dict
fields as attributes as well as by keys.
"""
def __init__(self, obj, objls, objidd, *args, **kwargs):
super(EpBunch, self).__init__(*args, **kwargs)
self.obj = obj # field names
self.objls = objls # field values
self.objidd = objidd # field metadata (minimum, maximum, type, etc.)
self.theidf = None # pointer to the idf this epbunch belongs to
# This is None if there is no idf - a standalone epbunch
# This will be set by Idf_MSequence
self['__functions'] = {} # initialize the functions
addfunctions(self)
@property
def fieldnames(self):
"""Friendly name for objls.
"""
return self.objls
@property
def fieldvalues(self):
"""Friendly name for obj.
"""
return self.obj
def checkrange(self, fieldname):
"""Check if the value for a field is within the allowed range.
"""
return checkrange(self, fieldname)
def getrange(self, fieldname):
"""Get the allowed range of values for a field.
"""
return getrange(self, fieldname)
def getfieldidd(self, fieldname):
"""get the idd dict for this field
Will return {} if the fieldname does not exist"""
return getfieldidd(self, fieldname)
def getfieldidd_item(self, fieldname, iddkey):
"""return an item from the fieldidd, given the iddkey
will return and empty list if it does not have the iddkey
or if the fieldname does not exist"""
return getfieldidd_item(self, fieldname, iddkey)
def get_retaincase(self, fieldname):
"""check if the field should retain case"""
return get_retaincase(self, fieldname)
def isequal(self, fieldname, value, places=7):
"""return True if the field == value
Will retain case if get_retaincase == True
for real value will compare to decimal 'places'
"""
return isequal(self, fieldname, value, places=places)
def getreferingobjs(self, iddgroups=None, fields=None):
"""Get a list of objects that refer to this object"""
return getreferingobjs(self, iddgroups=iddgroups, fields=fields)
def get_referenced_object(self, fieldname):
"""
Get an object referred to by a field in another object.
For example an object of type Construction has fields for each layer, each
of which refers to a Material. This functions allows the object
representing a Material to be fetched using the name of the layer.
Returns the first item found since if there is more than one matching item,
it is a malformed IDF.
Parameters
----------
referring_object : EpBunch
The object which contains a reference to another object,
fieldname : str
The name of the field in the referring object which contains the
reference to another object.
Returns
-------
EpBunch
"""
return get_referenced_object(self, fieldname)
def __setattr__(self, name, value):
try:
origname = self['__functions'][name]
# TODO: unit test never hits here so what is it for?
self[origname] = value
except KeyError:
pass
try:
name = self['__aliases'][name] # get original name of the alias
except KeyError:
pass
if name in ('__functions', '__aliases'): # just set the new value
self[name] = value
return None
elif name in ('obj', 'objls', 'objidd', 'theidf'): # let Bunch handle it
super(EpBunch, self).__setattr__(name, value)
return None
elif name in self.fieldnames: # set the value, extending if needed
i = self.fieldnames.index(name)
try:
self.fieldvalues[i] = value
except IndexError:
extendlist(self.fieldvalues, i)
self.fieldvalues[i] = value
else:
astr = "unable to find field %s" % (name, )
raise BadEPFieldError(astr) # TODO: could raise AttributeError
def __getattr__(self, name):
try:
func = self['__functions'][name]
return func(self)
except KeyError:
pass
try:
name = self['__aliases'][name]
except KeyError:
pass
if name == '__functions':
return self['__functions']
elif name in ('__aliases', 'obj', 'objls', 'objidd', 'theidf'):
# unit test
return super(EpBunch, self).__getattr__(name)
elif name in self.fieldnames:
i = self.fieldnames.index(name)
try:
return self.fieldvalues[i]
except IndexError:
return ''
else:
astr = "unable to find field %s" % (name, )
raise BadEPFieldError(astr)
def __getitem__(self, key):
if key in ('obj', 'objls', 'objidd',
'__functions', '__aliases', 'theidf'):
return super(EpBunch, self).__getitem__(key)
elif key in self.fieldnames:
i = self.fieldnames.index(key)
try:
return self.fieldvalues[i]
except IndexError:
return ''
else:
astr = "unknown field %s" % (key, )
raise BadEPFieldError(astr)
def __setitem__(self, key, value):
if key in ('obj', 'objls', 'objidd',
'__functions', '__aliases', 'theidf'):
super(EpBunch, self).__setitem__(key, value)
return None
elif key in self.fieldnames:
i = self.fieldnames.index(key)
try:
self.fieldvalues[i] = value
except IndexError:
extendlist(self.fieldvalues, i)
self.fieldvalues[i] = value
else:
astr = "unknown field %s" % (key, )
raise BadEPFieldError(astr)
def __repr__(self):
"""print this as an idf snippet"""
lines = [str(val) for val in self.obj]
comments = [comm.replace('_', ' ') for comm in self.objls]
lines[0] = "%s," % (lines[0], ) # comma after first line
for i, line in enumerate(lines[1:-1]):
lines[i + 1] = ' %s,' % (line, ) # indent and comma
lines[-1] = ' %s;' % (lines[-1], )# ';' after last line
lines = [line.ljust(26) for line in lines] # ljsut the lines
filler = '%s !- %s'
nlines = [filler % (line, comm) for line,
comm in zip(lines[1:], comments[1:])]# adds comments to line
nlines.insert(0, lines[0])# first line without comment
astr = '\n'.join(nlines)
return '\n%s\n' % (astr, )
def __str__(self):
"""same as __repr__"""
# needed if YAML is installed. See issue 67
# unit test
return self.__repr__()
def __dir__(self):
fnames = self.fieldnames
func_names = self['__functions'].keys()
return super(EpBunch, self).__dir__() + fnames + func_names
def getrange(bch, fieldname):
"""get the ranges for this field"""
keys = ['maximum', 'minimum', 'maximum<', 'minimum>', 'type']
index = bch.objls.index(fieldname)
fielddct_orig = bch.objidd[index]
fielddct = copy.deepcopy(fielddct_orig)
therange = {}
for key in keys:
therange[key] = fielddct.setdefault(key, None)
if therange['type']:
therange['type'] = therange['type'][0]
if therange['type'] == 'real':
for key in keys[:-1]:
if therange[key]:
therange[key] = float(therange[key][0])
if therange['type'] == 'integer':
for key in keys[:-1]:
if therange[key]:
therange[key] = int(therange[key][0])
return therange
def checkrange(bch, fieldname):
"""throw exception if the out of range"""
fieldvalue = bch[fieldname]
therange = bch.getrange(fieldname)
if therange['maximum'] != None:
if fieldvalue > therange['maximum']:
astr = "Value %s is not less or equal to the 'maximum' of %s"
astr = astr % (fieldvalue, therange['maximum'])
raise RangeError(astr)
if therange['minimum'] != None:
if fieldvalue < therange['minimum']:
astr = "Value %s is not greater or equal to the 'minimum' of %s"
astr = astr % (fieldvalue, therange['minimum'])
raise RangeError(astr)
if therange['maximum<'] != None:
if fieldvalue >= therange['maximum<']:
astr = "Value %s is not less than the 'maximum<' of %s"
astr = astr % (fieldvalue, therange['maximum<'])
raise RangeError(astr)
if therange['minimum>'] != None:
if fieldvalue <= therange['minimum>']:
astr = "Value %s is not greater than the 'minimum>' of %s"
astr = astr % (fieldvalue, therange['minimum>'])
raise RangeError(astr)
return fieldvalue
"""get the idd dict for this field
Will return {} if the fieldname does not exist"""
def getfieldidd(bch, fieldname):
"""get the idd dict for this field
Will return {} if the fieldname does not exist"""
# print(bch)
try:
fieldindex = bch.objls.index(fieldname)
except ValueError as e:
return {} # the fieldname does not exist
# so there is no idd
fieldidd = bch.objidd[fieldindex]
return fieldidd
def getfieldidd_item(bch, fieldname, iddkey):
"""return an item from the fieldidd, given the iddkey
will return and empty list if it does not have the iddkey
or if the fieldname does not exist"""
fieldidd = getfieldidd(bch, fieldname)
try:
return fieldidd[iddkey]
except KeyError as e:
return []
def get_retaincase(bch, fieldname):
"""Check if the field should retain case"""
fieldidd = bch.getfieldidd(fieldname)
return 'retaincase' in fieldidd
def isequal(bch, fieldname, value, places=7):
"""return True if the field is equal to value"""
def equalalphanumeric(bch, fieldname, value):
if bch.get_retaincase(fieldname):
return bch[fieldname] == value
else:
return bch[fieldname].upper() == value.upper()
fieldidd = bch.getfieldidd(fieldname)
try:
ftype = fieldidd['type'][0]
if ftype in ['real', 'integer']:
return almostequal(bch[fieldname], float(value), places=places)
else:
return equalalphanumeric(bch, fieldname, value)
except KeyError as e:
return equalalphanumeric(bch, fieldname, value)
def getreferingobjs(referedobj, iddgroups=None, fields=None):
"""Get a list of objects that refer to this object"""
# pseudocode for code below
# referringobjs = []
# referedobj has: -> Name
# -> reference
# for each obj in idf:
# [optional filter -> objects in iddgroup]
# each field of obj:
# [optional filter -> field in fields]
# has object-list [refname]:
# if refname in reference:
# if Name = field value:
# referringobjs.append()
referringobjs = []
idf = referedobj.theidf
referedidd = referedobj.getfieldidd("Name")
references = referedidd['reference']
idfobjs = idf.idfobjects.values()
idfobjs = list(itertools.chain.from_iterable(idfobjs)) # flatten list
if iddgroups: # optional filter
idfobjs = [anobj for anobj in idfobjs
if anobj.getfieldidd('key')['group'] in iddgroups]
for anobj in idfobjs:
if not fields:
thefields = anobj.objls
else:
thefields = fields
for field in thefields:
try:
itsidd = anobj.getfieldidd(field)
except ValueError as e:
continue
if 'object-list' in itsidd:
refname = itsidd['object-list'][0]
if refname in references:
if referedobj.isequal('Name', anobj[field]):
referringobjs.append(anobj)
return referringobjs
def get_referenced_object(referring_object, fieldname):
"""
Get an object referred to by a field in another object.
For example an object of type Construction has fields for each layer, each
of which refers to a Material. This functions allows the object
representing a Material to be fetched using the name of the layer.
Returns the first item found since if there is more than one matching item,
it is a malformed IDF.
Parameters
----------
referring_object : EpBunch
The object which contains a reference to another object,
fieldname : str
The name of the field in the referring object which contains the
reference to another object.
Returns
-------
EpBunch
"""
idf = referring_object.theidf
object_list = referring_object.getfieldidd_item(fieldname, u'object-list')
for obj_type in idf.idfobjects:
for obj in idf.idfobjects[obj_type]:
valid_object_lists = obj.getfieldidd_item("Name", u'reference')
if set(object_list).intersection(set(valid_object_lists)):
referenced_obj_name = referring_object[fieldname]
if obj.Name == referenced_obj_name:
return obj
|
|
#!/usr/bin/env python
import datetime
import logging
import os
import re
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.nsf.gov/oig/
archive = 1989
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
AUDIT_REPORTS_URL = "https://www.nsf.gov/oig/reports/reviews.jsp"
SEMIANNUAL_REPORTS_URL = "https://www.nsf.gov/oig/reports/semiannual.jsp"
TESTIMONY_REPORTS_URL = "https://www.nsf.gov/oig/testimony.jsp"
CASE_REPORTS_URL = "https://www.nsf.gov/oig/case-closeout/results.jsp"
CASE_REPORTS_DATA = {
'sortAll': 'cn',
'sballfrm': 'Search',
}
REPORT_PUBLISHED_MAP = {
"HSN_Summary": datetime.datetime(2013, 9, 30), # Estimated
}
REPORT_LINK_TEXT = re.compile("Entire.+Document", re.DOTALL)
REPORT_LEADIN_TEXT = re.compile("Available\s+Formats:")
def run(options):
year_range = inspector.year_range(options, archive)
# Pull the audit reports
doc = utils.beautifulsoup_from_url(AUDIT_REPORTS_URL)
results = doc.select("#inner-content tr")
if not results:
raise inspector.NoReportsFoundError("National Science Foundation (audit reports)")
for result in results:
# ignore divider lines
if result.select("img"):
continue
report = report_from(result, report_type='audit', year_range=year_range, base_url=AUDIT_REPORTS_URL)
if report:
inspector.save_report(report)
# Pull the semiannual reports
doc = utils.beautifulsoup_from_url(SEMIANNUAL_REPORTS_URL)
results = doc.select("#inner-content li")
if not results:
raise inspector.NoReportsFoundError("National Science Foundation (semiannual reports)")
for result in results:
if not result.text.strip():
continue
report = semiannual_report_from(result, year_range)
if report:
inspector.save_report(report)
# Pull the case reports
response = utils.scraper.post(
url=CASE_REPORTS_URL,
data=CASE_REPORTS_DATA,
)
doc = BeautifulSoup(response.content, "lxml")
results = doc.select("#inner-content tr")
if not results:
raise inspector.NoReportsFoundError("National Science Foundation (case reports)")
for index, result in enumerate(results):
if not index or not result.text.strip(): # Skip the header row and empty rows
continue
report = case_report_from(result, CASE_REPORTS_URL, year_range)
if report:
inspector.save_report(report)
# Pull the testimony
doc = utils.beautifulsoup_from_url(TESTIMONY_REPORTS_URL)
results = doc.select("#inner-content tr")
if not results:
raise inspector.NoReportsFoundError("National Science Foundation (testimony)")
for result in results:
if not result.text.strip():
continue
report = report_from(result, report_type='testimony', year_range=year_range, base_url=TESTIMONY_REPORTS_URL)
if report:
inspector.save_report(report)
def report_from(result, report_type, year_range, base_url):
link = result.find("a")
if not link:
logging.debug("Markup error, skipping: %s" % result)
return None
report_url = urljoin(base_url, link['href'])
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
title = " ".join(link.parent.text.split())
try:
last_column_node = result.select("td")[-1]
except IndexError:
last_column_node = result.select("td.tabletext")[-1]
if last_column_node.text.strip():
published_on_text, *report_id_text = last_column_node.stripped_strings
if report_id_text:
# If an explicit report_id is listed, use that.
report_id = report_id_text[0]
published_on_text = published_on_text.replace(" ,", ",").strip()
published_on_text = published_on_text.replace("Februray", "February")
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
else:
# No text in the last column. This is an incomplete row
published_on = REPORT_PUBLISHED_MAP[report_id]
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': "nsf",
'inspector_url': "https://www.nsf.gov/oig/",
'agency': "nsf",
'agency_name': "National Science Foundation",
'type': report_type,
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def case_report_from(result, landing_url, year_range):
link = result.find("a")
report_url = urljoin(CASE_REPORTS_URL, link['href'])
report_id = link.text
title = result.contents[5].text.strip()
# catch all until this can be more easily diagnosed
if not title:
title = "(Untitled)"
published_on_text = result.contents[3].text
published_on = datetime.datetime.strptime(published_on_text, '%m/%d/%y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': "nsf",
'inspector_url': "https://www.nsf.gov/oig/",
'agency': "nsf",
'agency_name': "National Science Foundation",
'type': 'inspection',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
def semiannual_report_from(result, year_range):
link = result.find("a")
report_url = urljoin(SEMIANNUAL_REPORTS_URL, link['href'])
if not report_url.endswith((".pdf", ".txt")):
landing_url = report_url
# Since this page redirects sometimes, we need to see where it redirects
# to so that we can resolve the relative urls later.
landing_page_response = utils.scraper.get(landing_url)
landing_url = landing_page_response.url
landing_page = BeautifulSoup(landing_page_response.content, "lxml")
report_leadin_text = landing_page.find(text=REPORT_LEADIN_TEXT)
report_link_text = landing_page.find(text=REPORT_LINK_TEXT)
report_link = None
if report_leadin_text:
report_link = report_leadin_text.parent.find('a', text='PDF')
if not report_link:
report_link = report_leadin_text.parent.find('a', text='TXT')
elif report_link_text:
report_link = report_link_text.parent
if report_link is None:
raise Exception("No report link found on %s" % landing_url)
if report_link.get('href'):
relative_report_url = report_link['href']
elif report_link.findChild("a"):
relative_report_url = report_link.findChild("a")['href']
elif report_link.findParent("a"):
relative_report_url = report_link.findParent("a")['href']
report_url = urljoin(landing_url, relative_report_url)
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
title = "Semiannual Report - {}".format(link.text)
published_on_text = link.text.strip()
published_on = datetime.datetime.strptime(published_on_text, '%B %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': "nsf",
'inspector_url': "https://www.nsf.gov/oig/",
'agency': "nsf",
'agency_name': "National Science Foundation",
'type': 'semiannual_report',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Rulebook.published'
db.add_column('dnd_rulebook', 'published', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Rulebook.published'
db.delete_column('dnd_rulebook', 'published')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Skill'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
|
import logging
import calendar
import copy
import dateutil
import numpy
import datetime
from dimagi.ext.couchdbkit import Document, DocumentSchema, StringProperty, IntegerProperty, DateTimeProperty
from casexml.apps.case.models import CommCareCase
from corehq.apps.crud.models import AdminCRUDDocumentMixin
from corehq.apps.indicators.admin.crud import (IndicatorAdminCRUDManager,
FormAliasIndicatorAdminCRUDManager, FormLabelIndicatorAdminCRUDManager,
CaseDataInFormIndicatorAdminCRUDManager, FormDataInCaseAdminCRUDManager, CouchIndicatorCRUDManager,
BaseDynamicIndicatorCRUDManager, CombinedCouchIndicatorCRUDManager)
from couchforms.models import XFormInstance
from dimagi.utils.dates import DateSpan, add_months, months_between
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.modules import to_function
from dimagi.utils.couch.cache import cache_core
class DocumentNotInDomainError(Exception):
pass
class DocumentMismatchError(Exception):
pass
class IndicatorDefinition(Document, AdminCRUDDocumentMixin):
"""
An Indicator Definition defines how to compute the indicator that lives
in the namespaced computed_ property of a case or form.
"""
namespace = StringProperty()
domain = StringProperty()
slug = StringProperty()
version = IntegerProperty()
class_path = StringProperty()
last_modified = DateTimeProperty()
_admin_crud_class = IndicatorAdminCRUDManager
_class_path = "corehq.apps.indicators.models"
_returns_multiple = False
def __init__(self, _d=None, **kwargs):
super(IndicatorDefinition, self).__init__(_d, **kwargs)
self.class_path = self._class_path
def __str__(self):
return "\n\n%(class_name)s - Modified %(last_modified)s\n %(slug)s, domain: %(domain)s," \
" version: %(version)s, namespace: %(namespace)s. ID: %(indicator_id)s." % {
'class_name': self.__class__.__name__,
'slug': self.slug,
'domain': self.domain,
'version': self.version,
'namespace': self.namespace,
'last_modified': (self.last_modified.strftime('%m %B %Y at %H:%M')
if self.last_modified else "Ages Ago"),
'indicator_id': self._id,
}
@classmethod
def key_properties(cls):
"""
The ordering of these property names should match the ordering of what's emitted in the first part of
the couch views used for fetching these indicators. These views currently are:
- indicators/dynamic_indicator_definitions (Couch View Indicator Defs)
- indicators/indicator_definitions (Form and Case Indicator Defs)
"""
return ["namespace", "domain", "slug"]
@classmethod
def indicator_list_view(cls):
return "indicators/indicator_definitions"
@classmethod
def _generate_couch_key(cls, version=None, reverse=False, **kwargs):
key = list()
key_prefix = list()
for p in cls.key_properties():
k = kwargs.get(p)
if k is not None:
key_prefix.append(p)
key.append(k)
key = [" ".join(key_prefix)] + key
couch_key = dict(startkey=key, endkey=key+[{}]) if version is None else dict(key=key+[version])
if reverse:
return dict(startkey=couch_key.get('endkey'), endkey=couch_key.get('startkey'))
return couch_key
@classmethod
def increment_or_create_unique(cls, namespace, domain, slug=None, version=None, **kwargs):
"""
If an indicator with the same namespace, domain, and version exists, create a new indicator with the
version number incremented.
# todo, this feels a bit buggy, so replace bulk copy indicators with
# copy to domain at some point
"""
couch_key = cls._generate_couch_key(
namespace=namespace,
domain=domain,
slug=slug,
reverse=True,
**kwargs
)
existing_indicator = cls.view(
cls.indicator_list_view(),
reduce=False,
include_docs=True,
descending=True,
limit=1,
**couch_key
).first()
if existing_indicator:
version = existing_indicator.version + 1
elif version is None:
version = 1
new_indicator = cls(
version=version,
namespace=namespace,
domain=domain,
slug=slug,
**kwargs
)
new_indicator.last_modified = datetime.datetime.utcnow()
new_indicator.save()
return new_indicator
@classmethod
def copy_to_domain(cls, domain, doc, override=False):
"""
This copies an indicator doc to the current domain. Intended to be used
by the export indicators feature.
:param domain: the name of the domain the indicator should be copied to
:param doc: the dictionary of kwargs to create the indicator
:param override: Whether to override the existing indicator
:return: True if indicator was copied, False if not
"""
for reserved in ['_id', '_rev', 'last_modified']:
if reserved in doc:
del doc[reserved]
couch_key = cls._generate_couch_key(
domain=domain,
reverse=True,
**doc
)
existing_indicator = cls.view(
cls.indicator_list_view(),
reduce=False,
include_docs=False,
descending=True,
limit=1,
**couch_key
).first()
if existing_indicator and not override:
return False
if existing_indicator:
existing_indicator.delete()
new_indicator = cls(domain=domain, **doc)
new_indicator.last_modified = datetime.datetime.utcnow()
new_indicator.save()
return True
@classmethod
@memoized
def get_current(cls, namespace, domain, slug, version=None, wrap=True, **kwargs):
couch_key = cls._generate_couch_key(
namespace=namespace,
domain=domain,
slug=slug,
version=version,
reverse=True,
**kwargs
)
results = cache_core.cached_view(cls.get_db(), cls.indicator_list_view(),
cache_expire=60*60*6,
reduce=False,
include_docs=False,
descending=True,
**couch_key
)
doc = results[0] if results else None
if wrap and doc:
try:
doc_class = to_function(doc.get('value', "%s.%s" % (cls._class_path, cls.__name__)))
doc_instance = doc_class.get(doc.get('id'))
return doc_instance
except Exception as e:
logging.error("No matching documents found for indicator %s: %s" % (slug, e))
return None
return doc
@classmethod
def all_slugs(cls, namespace, domain, **kwargs):
couch_key = cls._generate_couch_key(
namespace=namespace,
domain=domain,
reverse=True,
**kwargs
)
couch_key['startkey'][0] = couch_key.get('startkey', [])[0]+' slug'
couch_key['endkey'][0] = couch_key.get('endkey', [])[0]+' slug'
data = cls.view(cls.indicator_list_view(),
group=True,
group_level=cls.key_properties().index('slug')+2,
descending=True,
**couch_key
).all()
return [item.get('key',[])[-1] for item in data]
@classmethod
@memoized
def get_all(cls, namespace, domain, version=None, **kwargs):
all_slugs = cls.all_slugs(namespace, domain, **kwargs)
all_indicators = list()
for slug in all_slugs:
indicator = cls.get_current(namespace, domain, slug, version=version, **kwargs)
if indicator and issubclass(indicator.__class__, cls):
all_indicators.append(indicator)
return all_indicators
@classmethod
def get_all_of_type(cls, namespace, domain, show_only_current=False):
key = ["type", namespace, domain, cls.__name__]
indicators = cls.view(
cls.indicator_list_view(),
reduce=False,
include_docs=True,
startkey=key,
endkey=key+[{}]
).all()
unique = {}
for ind in indicators:
if ind.base_doc == "CaseIndicatorDefinition":
specific_doc = ind.case_type
elif ind.base_doc == "FormIndicatorDefinition":
specific_doc = ind.xmlns
else:
specific_doc = "couch"
unique["%s.%s.%s" % (ind.slug, ind.namespace, specific_doc)] = ind
return unique.values()
@classmethod
def get_nice_name(cls):
return "Indicator Definition"
class DynamicIndicatorDefinition(IndicatorDefinition):
description = StringProperty()
title = StringProperty()
base_doc = "DynamicIndicatorDefinition"
_admin_crud_class = BaseDynamicIndicatorCRUDManager
@classmethod
def indicator_list_view(cls):
return "indicators/dynamic_indicator_definitions"
@property
def date_display_format(self):
return "%b. %Y"
def get_first_day_of_month(self, year, month):
return datetime.datetime(year, month, 1,
hour=0, minute=0, second=0, microsecond=0)
def get_last_day_of_month(self, year, month):
last_day = calendar.monthrange(year, month)[1]
return datetime.datetime(year, month, last_day,
hour=23, minute=59, second=59, microsecond=999999)
def get_month_datespan(self, start, end=None):
"""
start and end are (year, month) tuples
"""
if end is None:
end=start
return DateSpan(
self.get_first_day_of_month(start[0], start[1]),
self.get_last_day_of_month(end[0], end[1]),
format="%b %Y",
inclusive=False
)
def get_first_days(self, current_month, num_previous_months, as_datespans=False):
enddate = current_month or datetime.datetime.utcnow()
enddate = self.get_first_day_of_month(enddate.year, enddate.month)
(start_year, start_month) = add_months(enddate.year, enddate.month, -num_previous_months)
startdate = self.get_last_day_of_month(start_year, start_month)
months = months_between(startdate, enddate)
if num_previous_months == 0:
months = [(enddate.year, enddate.month)]
month_dates = []
for year, month in months:
if as_datespans:
month_dates.append(self.get_month_datespan((year, month)))
else:
month_dates.append(self.get_first_day_of_month(year, month))
datespan = self.get_month_datespan(
(startdate.year, startdate.month),
(enddate.year, enddate.month)
)
return month_dates, datespan
def get_monthly_retrospective(self, user_ids=None, current_month=None,
num_previous_months=12, return_only_dates=False,
is_debug=False):
"""
:param user_ids: List of CommCareUser Ids contributing to this indicator
:param current_month: integer of the current month
:param num_previous_months: number of months to be subtracted from the
current month to get the full retrospective
:param return_only_dates:
:param is_debug: True if debugging the view
:return: list of dictionaries with retrospective data
"""
raise NotImplementedError
def get_value(self, user_ids, datespan=None, is_debug=False):
raise NotImplementedError
class CouchIndicatorDef(DynamicIndicatorDefinition):
"""
This indicator defintion expects that it will deal with a couch view and an indicator key.
If a user_id is provided when fetching the results, this definition will use:
["user", <domain_name>, <user_id>, <indicator_key>] as the main couch view key
Otherwise it will use:
["all", <domain_name>, <indicator_key>]
"""
couch_view = StringProperty()
indicator_key = StringProperty()
startdate_shift = IntegerProperty(default=0)
enddate_shift = IntegerProperty(default=0)
fixed_datespan_days = IntegerProperty(default=0)
fixed_datespan_months = IntegerProperty(default=0)
_admin_crud_class = CouchIndicatorCRUDManager
@property
@memoized
def group_results_in_retrospective(self):
"""
Determines whether or not to group results in the retrospective
"""
return not any(getattr(self, field) for field in ('startdate_shift', 'enddate_shift',
'fixed_datespan_days', 'fixed_datespan_months'))
def _get_results_key(self, user_id=None):
prefix = "user" if user_id else "all"
key = [prefix, self.domain]
if user_id:
key.append(user_id)
key.append(self.indicator_key)
return key
def _apply_datespan_shifts(self, datespan):
if datespan and not isinstance(datespan, DateSpan):
raise ValueError("datespan must be an instance of DateSpan")
if datespan:
datespan = copy.copy(datespan)
now = datetime.datetime.utcnow()
# make sure we don't go over the current day
# remember, there is no timezone support for this yet
if datespan.enddate > now:
datespan.enddate = now
datespan.enddate = datespan.enddate.replace(hour=23, minute=59, second=59, microsecond=999999)
if self.fixed_datespan_days:
datespan.startdate = datespan.enddate - datetime.timedelta(days=self.fixed_datespan_days,
microseconds=-1)
if self.fixed_datespan_months:
# By making the assumption that the end date is always the end of the month
# the first months adjustment is accomplished by moving the start date to
# the beginning of the month. Any additional months are subtracted in the usual way
start = self.get_first_day_of_month(datespan.enddate.year, datespan.enddate.month)
start_year, start_month = add_months(start.year, start.month, -(self.fixed_datespan_months - 1))
datespan.startdate = start.replace(year=start_year, month=start_month)
if self.startdate_shift:
datespan.startdate = datespan.startdate + datetime.timedelta(days=self.startdate_shift)
if self.enddate_shift:
datespan.enddate = datespan.enddate + datetime.timedelta(days=self.enddate_shift)
return datespan
def get_results_with_key(self, key, user_id=None, datespan=None, date_group_level=None, reduce=False):
view_kwargs = dict()
if datespan:
view_kwargs.update(
startkey=key+datespan.startdate_key_utc,
endkey=key+datespan.enddate_key_utc+[{}]
)
else:
view_kwargs.update(
startkey=key,
endkey=key+[{}]
)
if date_group_level:
base_level = 5 if user_id else 4
view_kwargs.update(
group=True,
group_level=base_level+date_group_level
)
else:
view_kwargs.update(
reduce=reduce
)
# Pull Data from the MVP-only DB
from mvp_docs.models import IndicatorXForm
db = IndicatorXForm.get_db()
section = self.couch_view.split('/')
couch_view = "%s_indicators/%s" % (section[0], section[1])
return cache_core.cached_view(db, couch_view, cache_expire=60*60*6, **view_kwargs)
def get_raw_results(self, user_ids, datespan=False, date_group_level=False, reduce=False):
"""
date_group_level can be 0 to group by year, 1 to group by month and 2 to group by day
"""
datespan = self._apply_datespan_shifts(datespan)
results = []
for user_id in user_ids:
key = self._get_results_key(user_id)
results.extend(self.get_results_with_key(key, user_id, datespan, date_group_level, reduce))
return results
def get_value(self, user_ids, datespan=None, is_debug=False):
results = self.get_raw_results(user_ids, datespan, reduce=not is_debug)
if is_debug:
contributing_ids = [r['id'] for r in results]
value = len(contributing_ids)
return value, contributing_ids
value = 0
for result in results:
value += self._get_value_from_result(result)
return value
def _get_value_from_result(self, result):
value = 0
if isinstance(result, dict):
result = [result]
for item in result:
new_val = item.get('value')
if isinstance(new_val, dict):
if '_total_unique' in new_val:
value += new_val.get('_total_unique', 0)
elif '_sum_unique':
value += new_val.get('_sum_unique', 0)
else:
value += new_val
return value
def get_values_by_month(self, user_ids, datespan=None):
totals = dict()
result = self.get_raw_results(user_ids, datespan, date_group_level=1)
for item in result:
key = item.get('key', [])
if len(key) >= 2:
value = self._get_value_from_result(item)
year = str(key[-2])
month = str(key[-1])
if not (month and year):
continue
if year not in totals:
totals[year] = dict()
if month not in totals[year]:
totals[year][month] = 0
totals[year][month] += value
return totals
def get_values_by_year(self, user_ids, datespan=None):
totals = dict()
result = self.get_raw_results(user_ids, datespan, date_group_level=0)
for item in result:
key = item.get('key', [])
value = self._get_value_from_result(item)
if len(key) >= 1:
year = str(key[-1])
if not year:
continue
if year not in totals:
totals[year] = 0
totals[year] += value
return totals
def get_monthly_retrospective(self, user_ids=None, current_month=None,
num_previous_months=12, return_only_dates=False,
is_debug=False):
if not isinstance(user_ids, list):
user_ids = [user_ids]
results_are_grouped = self.group_results_in_retrospective and not is_debug
retro_months, datespan = self.get_first_days(current_month, num_previous_months,
as_datespans=not results_are_grouped)
monthly_totals = {}
if results_are_grouped and not return_only_dates:
monthly_totals = self.get_values_by_month(user_ids, datespan)
retrospective = []
for i, this_month in enumerate(retro_months):
startdate = this_month if results_are_grouped else this_month.startdate
y = str(startdate.year)
m = str(startdate.month)
if return_only_dates:
month_value = 0
elif results_are_grouped:
month_value = monthly_totals.get(y, {}).get(m, 0)
else:
month_value = self.get_value(user_ids, this_month, is_debug=is_debug)
monthly_result = {
'date': startdate,
}
if isinstance(month_value, tuple):
monthly_result['debug_data'] = month_value[1]
month_value = month_value[0]
monthly_result['value'] = month_value
retrospective.append(monthly_result)
return retrospective
@classmethod
def get_nice_name(cls):
return "Simple Indicators"
@classmethod
def increment_or_create_unique(cls, namespace, domain,
slug=None, version=None, **kwargs):
if 'couch_view' in kwargs:
# make sure that a viewname with trailing whitespace NEVER
# gets created.
kwargs['couch_view'] = kwargs['couch_view'].strip()
super(CouchIndicatorDef, cls).increment_or_create_unique(
namespace, domain, slug=slug, version=version, **kwargs
)
class NoGroupCouchIndicatorDefBase(CouchIndicatorDef):
"""
Use this base for all CouchViewIndicatorDefinitions that have views which are not simply
counted during the monthly retrospective.
"""
@property
def group_results_in_retrospective(self):
return False
def get_value(self, user_ids, datespan=None, is_debug=False):
raise NotImplementedError("You must override the parent's get_value. "
"Reduce / group will not work here.")
class CountUniqueCouchIndicatorDef(NoGroupCouchIndicatorDefBase):
"""
Use this indicator to count the # of unique emitted values.
"""
def get_value(self, user_ids, datespan=None, is_debug=False):
results = self.get_raw_results(user_ids, datespan)
all_emitted_values = [r['value'] for r in results]
all_emitted_values = set(all_emitted_values)
value = len(all_emitted_values)
return (value, list(all_emitted_values)) if is_debug else value
@classmethod
def get_nice_name(cls):
return "Count Unique Emitted Values"
class MedianCouchIndicatorDef(NoGroupCouchIndicatorDefBase):
"""
Get the median value of what is emitted. Assumes that emits are numbers.
"""
def get_value(self, user_ids, datespan=None, is_debug=False):
results = self.get_raw_results(user_ids, datespan)
data = dict([(r['id'], r['value']) for r in results])
value = numpy.median(data.values()) if data.values() else None
if is_debug:
return value, data
return value
@classmethod
def get_nice_name(cls):
return "Median of Emitted Values"
class SumLastEmittedCouchIndicatorDef(NoGroupCouchIndicatorDefBase):
"""
Expects an emitted value formatted like:
{
_id: "<unique id string>",
value: <number>,
}
It then finds the sum of all the last emitted unique values.
"""
def get_value(self, user_ids, datespan=None, is_debug=False):
results = self.get_raw_results(user_ids, datespan)
unique_values = {}
for item in results:
if item.get('value'):
unique_values[item['value']['_id']] = item['value']['value']
value = sum(unique_values.values())
return (value, unique_values.keys()) if is_debug else value
@classmethod
def get_nice_name(cls):
return "Sum Last Emitted Unique Values"
class CombinedCouchViewIndicatorDefinition(DynamicIndicatorDefinition):
numerator_slug = StringProperty()
denominator_slug = StringProperty()
_admin_crud_class = CombinedCouchIndicatorCRUDManager
@property
@memoized
def numerator(self):
return self.get_current(self.namespace, self.domain, self.numerator_slug)
@property
@memoized
def denominator(self):
return self.get_current(self.namespace, self.domain, self.denominator_slug)
def get_value(self, user_ids, datespan=None, is_debug=False):
numerator = self.numerator.get_value(user_ids, datespan, is_debug=is_debug)
denominator = self.denominator.get_value(user_ids, datespan, is_debug=is_debug)
debug_data = {}
if isinstance(denominator, tuple):
debug_data["denominator"] = denominator[1]
denominator = denominator[0]
if isinstance(numerator, tuple):
debug_data["numerator"] = numerator[1]
numerator = numerator[0]
ratio = float(numerator)/float(denominator) if denominator > 0 else None
value = {
'numerator': numerator,
'denominator': denominator,
'ratio': ratio,
}
if is_debug:
value['contributing_ids'] = debug_data
return value
def get_monthly_retrospective(self, user_ids=None, current_month=None,
num_previous_months=12, return_only_dates=False,
is_debug=False):
numerator_retro = self.numerator.get_monthly_retrospective(
user_ids, current_month, num_previous_months,
return_only_dates, is_debug=is_debug)
denominator_retro = self.denominator.get_monthly_retrospective(
user_ids, current_month, num_previous_months,
return_only_dates, is_debug=is_debug)
combined_retro = []
for i, denominator in enumerate(denominator_retro):
numerator = numerator_retro[i]
n_val = numerator.get('value', 0)
d_val = denominator.get('value', 0)
ratio = float(n_val)/float(d_val) if d_val else None
monthly_combined = {
'date': denominator.get('date'),
'numerator': n_val,
'denominator': d_val,
'ratio': ratio,
}
if is_debug:
monthly_combined.update({
'contributing_ids': {
'numerator': numerator.get('contributing_ids'),
'denominator': denominator.get('contributing_ids'),
},
})
combined_retro.append(monthly_combined)
return combined_retro
@classmethod
def get_nice_name(cls):
return "Combined Indicators (Ratio)"
class BaseDocumentIndicatorDefinition(IndicatorDefinition):
"""
This IndicatorDefinition expects to get a value from a couchdbkit Document and then
save that value in the computed_ property of that Document.
So far, the types of Documents that support this are XFormInstance and CommCareCase
"""
def get_clean_value(self, doc):
"""
Add validation to whatever comes in as doc here...
"""
if self.domain and doc.domain != self.domain:
raise DocumentNotInDomainError
return self.get_value(doc)
def get_value(self, doc):
raise NotImplementedError
def get_existing_value(self, doc):
try:
return doc.computed_.get(self.namespace, {}).get(self.slug, {}).get('value')
except AttributeError:
return None
def update_computed_namespace(self, computed, document):
"""
Returns True if this document should be updated and saved with the new indicator definition.
"""
update_computed = True
existing_indicator = computed.get(self.slug)
if isinstance(existing_indicator, dict):
update_computed = existing_indicator.get('version') != self.version
if update_computed:
computed[self.slug] = self.get_doc_dict(document)
return computed, update_computed
def get_doc_dict(self, document):
return {
'version': self.version,
'value': self.get_clean_value(document),
'multi_value': self._returns_multiple,
'type': self.doc_type,
'updated': datetime.datetime.utcnow(),
}
class FormDataIndicatorDefinitionMixin(DocumentSchema):
"""
Use this mixin whenever you plan on dealing with forms in indicator definitions.
"""
xmlns = StringProperty()
def get_from_form(self, form_data, question_id):
"""
question_id must be formatted like: path.to.question_id
"""
if isinstance(question_id, basestring):
question_id = question_id.split('.')
if len(question_id) > 0 and form_data:
return self.get_from_form(form_data.get(question_id[0]), question_id[1:])
if isinstance(form_data, dict) and '#text' in form_data:
return form_data['#text']
return form_data
class FormIndicatorDefinition(BaseDocumentIndicatorDefinition, FormDataIndicatorDefinitionMixin):
"""
This Indicator Definition defines an indicator that will live in the computed_ property of an XFormInstance
document. The 'doc' passed through get_value and get_clean_value should be an XFormInstance.
"""
base_doc = "FormIndicatorDefinition"
def get_clean_value(self, doc):
if not isinstance(doc, XFormInstance) or not issubclass(doc.__class__, XFormInstance):
raise ValueError("The document provided must be an instance of XFormInstance.")
if not doc.xmlns == self.xmlns:
raise DocumentMismatchError("The xmlns of the form provided does not match the one for this definition.")
return super(FormIndicatorDefinition, self).get_clean_value(doc)
@classmethod
def key_properties(cls):
return ["namespace", "domain", "xmlns", "slug"]
class FormLabelIndicatorDefinition(FormIndicatorDefinition):
"""
This indicator definition labels the forms of a certain XMLNS with the slug provided as its type.
This type is used as a way to label that form in couch views.
For example, I want an XMLNS of http://domain.commcarehq.org/child/visit/ to map to child_visit_form.
"""
_admin_crud_class = FormLabelIndicatorAdminCRUDManager
def get_value(self, doc):
return self.slug
@classmethod
def get_label_for_xmlns(cls, namespace, domain, xmlns):
key = [namespace, domain, xmlns]
label = cls.get_db().view("indicators/form_labels",
reduce=False,
descending=True,
limit=1,
startkey=key + [{}],
endkey=key
).one()
return label['value'] if label else ""
@classmethod
def get_nice_name(cls):
return "Form Label Indicators"
class FormDataAliasIndicatorDefinition(FormIndicatorDefinition):
"""
This Indicator Definition is targeted for the scenarios where you have an indicator report across multiple
domains and each domain's application doesn't necessarily have standardized question IDs. This provides a way
of aliasing question_ids on a per-domain basis so that you can reference the same data in a standardized way
as a computed_ indicator.
"""
question_id = StringProperty()
_admin_crud_class = FormAliasIndicatorAdminCRUDManager
def get_value(self, doc):
form_data = doc.form
return self.get_from_form(form_data, self.question_id)
@classmethod
def get_nice_name(cls):
return "Form Alias Indicators"
class CaseDataInFormIndicatorDefinition(FormIndicatorDefinition):
"""
Use this indicator when you want to pull the value from a case property of a case related to a form
and include it as an indicator for that form.
This currently assumes the pre-2.0 model of CommCareCases and that there is only one related case per form.
This should probably get rewritten to handle forms that update more than one type of case or for sub-cases.
"""
case_property = StringProperty()
_admin_crud_class = CaseDataInFormIndicatorAdminCRUDManager
def get_value(self, doc):
case = self._get_related_case(doc)
if case is not None and hasattr(case, str(self.case_property)):
return getattr(case, str(self.case_property))
return None
def _get_related_case(self, xform):
form_data = xform.form
related_case_id = form_data.get('case', {}).get('@case_id')
if related_case_id:
try:
return CommCareCase.get(related_case_id)
except Exception:
pass
return None
def update_computed_namespace(self, computed, document):
computed, is_update = super(CaseDataInFormIndicatorDefinition,
self).update_computed_namespace(computed, document)
if not is_update:
# check to see if the related case has changed information
case = self._get_related_case(document)
if case is not None:
try:
indicator_updated = computed.get(self.slug, {}).get('updated')
if indicator_updated and not isinstance(indicator_updated, datetime.datetime):
indicator_updated = dateutil.parser.parse(indicator_updated)
is_update = not indicator_updated or case.server_modified_on > indicator_updated
if is_update:
computed[self.slug] = self.get_doc_dict(document)
except ValueError:
pass
return computed, is_update
@classmethod
def get_nice_name(cls):
return "Related Case Property Indicators"
class CaseIndicatorDefinition(BaseDocumentIndicatorDefinition):
"""
This Indicator Definition defines an indicator that will live in the computed_ property of a CommCareCase
document. The 'doc' passed through get_value and get_clean_value should be a CommCareCase.
"""
case_type = StringProperty()
base_doc = "CaseIndicatorDefinition"
def get_clean_value(self, doc):
if not isinstance(doc, CommCareCase) or not issubclass(doc.__class__, CommCareCase):
raise ValueError("The document provided must be an instance of CommCareCase.")
if not doc.type == self.case_type:
raise DocumentMismatchError("The case provided should be a '%s' type case." % self.case_type)
return super(CaseIndicatorDefinition, self).get_clean_value(doc)
@classmethod
def key_properties(cls):
return ["namespace", "domain", "case_type", "slug"]
class FormDataInCaseIndicatorDefinition(CaseIndicatorDefinition, FormDataIndicatorDefinitionMixin):
"""
Use this for when you want to grab all forms with the relevant xmlns in a case's xform_ids property and
include a property from those forms as an indicator for this case.
"""
question_id = StringProperty()
_returns_multiple = True
_admin_crud_class = FormDataInCaseAdminCRUDManager
def get_related_forms(self, case):
if not isinstance(case, CommCareCase) or not issubclass(case.__class__, CommCareCase):
raise ValueError("case is not an instance of CommCareCase.")
all_forms = case.get_forms()
all_forms.reverse()
related_forms = list()
for form in all_forms:
if form.xmlns == self.xmlns:
related_forms.append(form)
return related_forms
def get_value(self, doc):
existing_value = self.get_existing_value(doc)
if not isinstance(existing_value, dict):
existing_value = dict()
forms = self.get_related_forms(doc)
for form in forms:
if isinstance(form, XFormInstance) or not issubclass(doc.__class__, XFormInstance):
form_data = form.form
existing_value[form.get_id] = {
'value': self.get_from_form(form_data, self.question_id),
'timeEnd': self.get_from_form(form_data, 'meta.timeEnd'),
'received_on': form.received_on,
}
return existing_value
def update_computed_namespace(self, computed, document):
computed, is_update = super(FormDataInCaseIndicatorDefinition,
self).update_computed_namespace(computed, document)
if not is_update:
# check to see if more relevant forms have been added to the case since the last time
# this indicator was computed
related_forms = self.get_related_forms(document)
if related_forms:
try:
value_list = computed.get(self.slug, {}).get('value', {})
saved_form_ids = value_list.keys()
current_ids = set([f._id for f in related_forms])
is_update = len(current_ids.difference(saved_form_ids)) > 0
if is_update:
computed[self.slug] = self.get_doc_dict(document)
except Exception as e:
logging.error("Error updating computed namespace for doc %s: %s" % (document._id, e))
return computed, is_update
@classmethod
def get_nice_name(cls):
return "Related Form Question ID Indicators"
|
|
"""Run DESeq analysis."""
from pathlib import Path
from plumbum import TEE
from resolwe.process import (
BooleanField,
Cmd,
DataField,
FileField,
FloatField,
GroupField,
IntegerField,
JsonField,
ListField,
Persistence,
Process,
SchedulingClass,
StringField,
)
class Deseq(Process):
"""Run DESeq2 analysis.
The DESeq2 package estimates variance-mean dependence in count data
from high-throughput sequencing assays and tests for differential
expression based on a model using the negative binomial
distribution. See
[here](https://www.bioconductor.org/packages/release/bioc/manuals/DESeq2/man/DESeq2.pdf)
and [here](http://bioconductor.org/packages/devel/bioc/vignettes/DESeq2/inst/doc/DESeq2.html)
for more information.
"""
slug = "differentialexpression-deseq2"
name = "DESeq2"
process_type = "data:differentialexpression:deseq2"
version = "3.5.0"
category = "Differential Expression"
scheduling_class = SchedulingClass.BATCH
persistence = Persistence.CACHED
requirements = {
"expression-engine": "jinja",
"executor": {
"docker": {"image": "public.ecr.aws/genialis/resolwebio/rnaseq:6.0.0"}
},
"resources": {"cores": 1, "memory": 8192},
}
data_name = "Differential expression (case vs. control)"
class Input:
"""Input fields to process Deseq."""
case = ListField(
DataField("expression"),
label="Case",
description="Case samples (replicates)",
)
control = ListField(
DataField("expression"),
label="Control",
description="Control samples (replicates)",
)
create_sets = BooleanField(
label="Create gene sets",
description="After calculating differential gene "
"expressions create gene sets for up-regulated genes, "
"down-regulated genes and all genes.",
default=False,
)
logfc = FloatField(
label="Log2 fold change threshold for gene sets",
description="Genes above Log2FC are considered as "
"up-regulated and genes below -Log2FC as down-regulated.",
default=1.0,
hidden="!create_sets",
)
fdr = FloatField(
label="FDR threshold for gene sets",
default=0.05,
hidden="!create_sets",
)
class Options:
"""Options."""
beta_prior = BooleanField(
label="Beta prior",
default=False,
description="Whether or not to put a zero-mean normal prior "
"on the non-intercept coefficients.",
)
class FilterOptions:
"""Filtering options."""
count = BooleanField(
label="Filter genes based on expression count",
default=True,
)
min_count_sum = IntegerField(
label="Minimum gene expression count summed over all samples",
default=10,
description="Filter genes in the expression matrix input. "
"Remove genes where the expression count sum over all samples "
"is below the threshold.",
hidden="!filter_options.count",
)
cook = BooleanField(
label="Filter genes based on Cook's distance",
default=False,
)
cooks_cutoff = FloatField(
label="Threshold on Cook's distance",
required=False,
description="If one or more samples have Cook's distance "
"larger than the threshold set here, the p-value for the row "
"is set to NA. If left empty, the default threshold of 0.99 "
"quantile of the F(p, m-p) distribution is used, where p is "
"the number of coefficients being fitted and m is the number "
"of samples. This test excludes Cook's distance of samples "
"belonging to experimental groups with only two samples.",
hidden="!filter_options.cook",
)
independent = BooleanField(
label="Apply independent gene filtering",
default=True,
)
alpha = FloatField(
label="Significance cut-off used for optimizing independent "
"gene filtering",
default=0.1,
description="The value should be set to adjusted p-value "
"cut-off (FDR).",
hidden="!filter_options.independent",
)
options = GroupField(Options, label="Gene filtering options")
filter_options = GroupField(
FilterOptions, label="Differential expression analysis options"
)
class Output:
"""Output fields of the process Deseq."""
raw = FileField("Differential expression")
de_json = JsonField(label="Results table (JSON)")
de_file = FileField(label="Results table (file)")
count_matrix = FileField(label="Count matrix")
source = StringField(label="Gene ID database")
species = StringField(label="Species")
build = StringField(label="Build")
feature_type = StringField(label="Feature type")
def run(self, inputs, outputs):
"""Run the analysis."""
expressions = inputs.case + inputs.control
if any(e.type == "data:expression:microarray:" for e in expressions):
self.error("Microarray expressions are not supported.")
for exp in expressions:
if exp.output.source != expressions[0].output.source:
self.error(
"Input samples are of different Gene ID databases: "
f"{exp.output.source} and {expressions[0].output.source}."
)
if exp.output.species != expressions[0].output.species:
self.error(
"Input samples are of different Species: "
f"{exp.output.species} and {expressions[0].output.species}."
)
if exp.output.build != expressions[0].output.build:
self.error(
"Input samples are of different Build: "
f"{exp.output.build} and {expressions[0].output.build}."
)
if exp.output.feature_type != expressions[0].output.feature_type:
self.error(
"Input samples are of different Feature type: "
f"{exp.output.feature_type} and {expressions[0].output.feature_type}."
)
for case in inputs.case:
if case in inputs.control:
self.error(
"Case and Control groups must contain unique "
f"samples. Sample {case.sample_name} is in both Case "
"and Control group."
)
self.progress(0.1)
if all(e.type == "data:expression:nanostring:" for e in expressions):
params = [
"--cases",
[e.output.exp.path for e in inputs.case],
"--controls",
[e.output.exp.path for e in inputs.control],
"--format",
"nanostring",
]
elif all(e.type == "data:expression:rsem:" for e in expressions):
params = [
"--cases",
[e.output.genes.path for e in inputs.case],
"--controls",
[e.output.genes.path for e in inputs.control],
"--format",
"rsem",
]
elif all(e.type == "data:expression:salmon:" for e in expressions):
params = [
"--cases",
[e.output.quant.path for e in inputs.case],
"--controls",
[e.output.quant.path for e in inputs.control],
"--format",
"salmon",
"--tx2gene",
inputs.case[0].output.txdb.path,
]
else:
if not all(hasattr(e.output.rc, "path") for e in expressions):
self.error("Read counts are required when using DESeq2.")
params = [
"--cases",
[e.output.rc.path for e in inputs.case],
"--controls",
[e.output.rc.path for e in inputs.control],
]
if inputs.options.beta_prior:
params.append("--beta-prior")
if inputs.filter_options.count:
params.extend(["--min-count-sum", inputs.filter_options.min_count_sum])
if inputs.filter_options.cook:
params.extend(["--cooks-cutoff", inputs.filter_options.cooks_cutoff])
if inputs.filter_options.independent:
params.extend(["--independent", "--alpha", inputs.filter_options.alpha])
return_code, _, _ = Cmd["deseq.R"][params] & TEE(retcode=None)
self.progress(0.95)
deseq_output = "diffexp_deseq2.tab"
args = [
deseq_output,
"de_data.json",
"de_file.tab.gz",
"--gene_id",
"gene_id",
"--fdr",
"padj",
"--pvalue",
"pvalue",
"--logfc",
"log2FoldChange",
"--stat",
"stat",
]
return_code, _, _ = Cmd["parse_diffexp.py"][args] & TEE(retcode=None)
if return_code:
self.error("Error while parsing DGE results.")
(Cmd["gzip"][deseq_output])()
(Cmd["gzip"]["count_matrix.tab"])()
outputs.raw = f"{deseq_output}.gz"
outputs.de_json = "de_data.json"
outputs.de_file = "de_file.tab.gz"
outputs.count_matrix = "count_matrix.tab.gz"
outputs.source = expressions[0].output.source
outputs.species = expressions[0].output.species
outputs.build = expressions[0].output.build
outputs.feature_type = expressions[0].output.feature_type
if inputs.create_sets:
out_dir = "gene_sets"
gene_set_args = [
"--dge_file",
"de_file.tab.gz",
"--out_dir",
out_dir,
"--analysis_name",
self.name,
"--tool",
"DESeq2",
"--logfc",
inputs.logfc,
"--fdr",
inputs.fdr,
]
return_code, _, _ = Cmd["create_gene_sets.py"][gene_set_args] & TEE(
retcode=None
)
if return_code:
self.error("Error while creating gene sets.")
for gene_file in sorted(Path(out_dir).glob("*.tab.gz")):
gene_file.rename(Path() / gene_file.name)
process_inputs = {
"src": str(gene_file.name),
"source": expressions[0].output.source,
"species": expressions[0].output.species,
}
self.run_process("upload-geneset", process_inputs)
|
|
#!/usr/bin/env python
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (succesful) {
put_a_to_python
if (succesful) {
put_b_to_python
if (succesful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
"""
"""
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
__version__ = "$Revision: 1.129 $"[10:-1]
import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import types
import copy
errmess=sys.stderr.write
outmess=sys.stdout.write
show=pprint.pprint
from auxfuncs import *
import capi_maps
from capi_maps import *
import cfuncs
import common_rules
import use_rules
import f90mod_rules
import func2subr
options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr','method',
'pyobjfrom','closepyobjfrom',
'freemem',
'userincludes',
'includes0','includes','typedefs','typedefs_generated',
'cppmacros','cfuncs','callbacks',
'latexdoc',
'restdoc',
'routine_defs','externroutines',
'initf2pywraphooks',
'commonhooks','initcommonhooks',
'f90modhooks','initf90modhooks']:
sepdict[k]='\n'
#################### Rules for C/API module #################
module_rules={
'modulebody':"""\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """+time.asctime(time.localtime(time.time()))+"""
* $R"""+"""evision:$
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
#includes#
#includes0#
"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
#typedefs#
"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
#typedefs_generated#
"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
#cppmacros#
"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
#cfuncs#
"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
#userincludes#
"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
PyMODINIT_FUNC init#modulename#(void) {
\tint i;
\tPyObject *m,*d, *s;
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
\tPyFortran_Type.ob_type = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
\ts = PyString_FromString(\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc':['Module #modulename#\n'+'='*80,
'\n#restdoc#']
}
defmod_rules=[
{'body':'/*eof body*/',
'method':'/*eof method*/',
'externroutines':'/*eof externroutines*/',
'routine_defs':'/*eof routine_defs*/',
'initf90modhooks':'/*eof initf90modhooks*/',
'initf2pywraphooks':'/*eof initf2pywraphooks*/',
'initcommonhooks':'/*eof initcommonhooks*/',
'latexdoc':'',
'restdoc':'',
'modnote':{hasnote:'#note#',l_not(hasnote):''},
}
]
routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\nFunction signature:\\n\\\n\t#docreturn##name#(#docsignatureshort#)\\n\\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':'#routine_def#',
'initf2pywraphooks':'#initf2pywraphook#',
'externroutines':'#declfortranroutine#',
'doc':'#docreturn##name#(#docsignature#)',
'docshort':'#docreturn##name#(#docsignatureshort#)',
'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
'need':['arrayobject.h','CFUNCSMESS','MINMAX'],
'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
'routdebugleave':'\n','routdebugfailure':'\n',
'setjmpbuf':' || ',
'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
'docstrcbs':'\n','docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
'docsign':'','docsignopt':'','decl':'/*decl*/',
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'Required arguments:',
'docstropt':'Optional arguments:',
'docstrout':'Return objects:',
'docstrcbs':'Call-back functions:',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'args_capi':'','keys_capi':'','functype':'',
'frompyobj':'/*frompyobj*/',
'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom':'/*pyobjfrom*/',
'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
'routdebugenter':'/*routdebugenter*/',
'routdebugfailure':'/*routdebugfailure*/',
'callfortranroutine':'/*callfortranroutine*/',
'argformat':'','keyformat':'','need_cfuncs':'',
'docreturn':'','return':'','returnformat':'','rformat':'',
'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
'initf2pywraphook':'',
'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
},{
'apiname':'f2py_rout_#modulename#_#name#',
'pyname':'#modulename#.#name#',
'decl':'',
'_check':l_not(ismoduleroutine)
},{
'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname':'#modulename#.#f90modulename#.#name#',
'decl':'',
'_check':ismoduleroutine
},{ # Subroutine
'functype':'void',
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine:'',
isdummyroutine:''
},
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check':issubroutine,
},{ # Wrapped function
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", PyCObject_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':isfunction_wrap,
},{ # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'\t#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi,l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction,l_not(isfunction_wrap))
},{ # Scalar function
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
},{ # String function # in use for --no-wrap
'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c):
# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC','#ctype#','STRINGFREE'],
'_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long:'long_long',
islong_double:'long_double',
islong_complex:'complex_long_double',
isunsigned_char:'unsigned_char',
isunsigned_short:'unsigned_short',
isunsigned:'unsigned',
isunsigned_long_long:'unsigned_long_long',
isunsigned_chararray:'unsigned_char',
isunsigned_shortarray:'unsigned_short',
isunsigned_long_longarray:'unsigned_long_long',
issigned_long_longarray:'long_long',
}
aux_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing auxiliary variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'need':typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'need':{hasinitvalue:'math.h'},
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'_check':l_and(isscalar,l_not(iscomplex)),
},
{
'return':',#varname#',
'docstrout':'\t#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'_depend':'',
'need':typedef_need_dict,
},
# Doc signatures
{
'docstropt':{l_and(isoptional,isintent_nothide):'\t#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'\t#pydocsign#'},
'docstrout':{isintent_out:'\t#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'depend':''
},
# Required/Optional arguments
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide,l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide,isoptional)
},
# Docstring/BuildValue
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'\t#varname#_extra_args := () input tuple'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#','setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(PyCObject_Check(#varname#_capi)) {
#varname#_cptr = PyCObject_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""},{isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## {l_not(isintent_callback):"""\
## if (#varname#_capi==Py_None) {
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP','create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar,l_not(iscomplex))
},{
'need':{hasinitvalue:'math.h'},
'_check':l_and(isscalar,l_not(iscomplex)),
#'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
},{
'frompyobj':[
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
'need':{l_not(islogical):'#ctype#_from_pyobj'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
},{ # Hidden
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'need':typedef_need_dict,
'_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
'_depend':''
},{ # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check':l_and(isscalar,l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return':{isintent_out:',#varname#_capi'},
'_check':iscomplex
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex,isintent_nothide)
},{
'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
'need':['#ctype#_from_pyobj'],
'_check':l_and(iscomplex,isintent_nothide),
'_depend':''
},{ # Hidden
'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
'_check':l_and(iscomplex,isintent_hide)
},{
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':l_and(iscomplex,isintent_hide),
'_depend':''
},{ # Common
'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need':['pyobj_from_#ctype#1'],
'_check':iscomplex
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check':iscomplex,
'_depend':''
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# 'freemem':'\tSTRINGFREE(#varname#);',
'return':{isintent_out:',#varname#'},
'need':['len..'],#'STRINGFREE'],
'_check':isstring
},{ # Common
'frompyobj':"""\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
'_check':isstring,
'_depend':''
},{ # Not hidden
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring,isintent_nothide)
},{ # Hidden
'_check':l_and(isstring,isintent_hide)
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
},{ # intent(overwrite) array
'decl':'\tint capi_overwrite_#varname# = 1;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'\toverwrite_#varname# := 1 input int',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_overwrite),
'_depend':'',
},
{ # intent(copy) array
'decl':'\tint capi_overwrite_#varname# = 0;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'\toverwrite_#varname# := 0 input int',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_copy),
'_depend':'',
},{
'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# 'pyobjfrom':{isintent_inout:"""\
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
# 'need':{isintent_inout:'copy_ND_array'},
'_check':l_and(isarray,isintent_nothide)
},{
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
{isintent_hide:'\t{'},
{iscomplexarray:'\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out,isintent_hide)):"""\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
],
'_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
# '_check':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
},{
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar,l_not(iscomplex)),
'_break':''
},{
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
},{
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
},{
'need':'CHECKGENERIC',
'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m,um):
"""
Return
"""
global f2py_version,options
outmess('\tBuilding module "%s"...\n'%(m['name']))
ret = {}
mod_rules=defmod_rules[:]
vrd=modsign2map(m)
rd=dictappend({'f2py_version':f2py_version},vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb=None
for bi in m['body']:
if not bi['block']=='interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name']==n: nb=b;break
if not nb:
errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
continue
nb_list = [nb]
if 'entry' in nb:
for k,a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api,wrap=buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar=applyrules(api,vrd)
rd=dictappend(rd,ar)
# Construct COMMON block support
cr,wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar=applyrules(cr,vrd)
rd=dictappend(rd,ar)
# Construct F90 module support
mr,wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar=applyrules(mr,vrd)
rd=dictappend(rd,ar)
for u in um:
ar=use_rules.buildusevars(u,m['use'][u['name']])
rd=dictappend(rd,ar)
needs=cfuncs.get_needs()
code={}
for n in needs.keys():
code[n]=[]
for k in needs[n]:
c=''
if k in cfuncs.includes0:
c=cfuncs.includes0[k]
elif k in cfuncs.includes:
c=cfuncs.includes[k]
elif k in cfuncs.userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(`k`));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r,vrd,m)
rd=dictappend(rd,ar)
ar=applyrules(module_rules,rd)
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.c')
ret['csrc'] = fn
f=open(fn,'w')
f.write(ar['modulebody'].replace('\t',2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s/%smodule.c"\n'%(m['name'],options['buildpath'],vrd['modulename']))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
f=open(fn,'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn,'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers.f'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
def buildapi(rout):
rout,wrap = func2subr.assubr(rout)
args,depargs=getargs2(rout)
capi_maps.depargs=depargs
var=rout['vars']
auxvars = [a for a in var.keys() if isintent_aux(var[a])]
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
# Routine
vrd=routsign2map(rout)
rd=dictappend({},vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r,vrd,rout)
rd=dictappend(rd,ar)
# Args
nth,nthk=0,0
savevrd={}
for a in args:
vrd=sign2map(a,var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=`nth`+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=`nthk`+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
if type(rd['cleanupfrompyobj']) is types.ListType:
rd['cleanupfrompyobj'].reverse()
if type(rd['closepyobjfrom']) is types.ListType:
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if type(rd['docreturn'])==types.ListType:
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and type(rd[k])==types.ListType:
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and type(rd[k])==types.ListType:
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str),repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules,rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar,wrap
#################### EOF rules.py #######################
|
|
#!/usr/bin/env python
""" stocks.py
=========
Author: Anthony Sutardja
Last modified: 2015-03-15
=========
Methods to fetch stock quotes from external sources.
"""
import httplib
import os
import string
import sys
import time
os.environ['TZ'] = 'US/Eastern'
time.tzset() # need to reset for eastern timezone
def validate_stock_symbol(symbol):
"""Return the symbol if it is valid."""
symbol = str(symbol).upper()
charlist = string.ascii_uppercase + "." + "-"
for ch in symbol:
if ch not in charlist:
raise ValueError("Must contain letters A-Z and/or .")
return symbol
def get_quote_from_google(symbol):
"""Return a tuple containing market quote information for a symbol.
Information is requested from Google.
"""
base_url = "www.google.com"
query_path = "/finance/getprices?i=60&p=1d&f=d,o,h,l,c,v&df=cpct&q="
# Query must contain symbols in capital letters
symbol = validate_stock_symbol(symbol)
# Make request
conn = httplib.HTTPConnection(base_url)
conn.request("GET", query_path + symbol)
response = conn.getresponse().read().rsplit()
# Check if symbol was valid.
# strips away 'EXCHANGE%3D' from first line of response
checkQuote = str(response[0][11:])
if (checkQuote == 'UNKNOWN+EXCHANGE'):
raise ValueError("Symbol is not listed.")
# Return last quote
lastReading = response[-1].split(",")
current, high, low = float(lastReading[1]), float(lastReading[2]), float(lastReading[3])
return current, high, low
def get_change_from_google(symbol):
"""Return da change."""
base_url = "www.google.com"
query_path = "/finance/getprices?i=86400&p=3d&f=d,c&df=cpct&q="
# Query must contain symbols in capital letters
symbol = validate_stock_symbol(symbol)
# Make request
conn = httplib.HTTPConnection(base_url)
conn.request("GET", query_path + symbol)
response = conn.getresponse().read().rsplit()
# Check if symbol was valid.
# strips away 'EXCHANGE%3D' from first line of response
checkQuote = str(response[0][11:])
if (checkQuote == 'UNKNOWN+EXCHANGE'):
raise ValueError("Symbol is not listed.")
# Return last quote
twoDaysBefore = response[-2].split(",")
old = float(twoDaysBefore[1])
oneDayBefore = response[-1].split(",")
current = float(oneDayBefore[1])
return (current - old) / old * 100.0
def get_formatted_change(symbol):
change = get_change_from_google(symbol)
if change > 0.0:
sign = '+'
else:
sign = ''
return '{sign}{change:.2f}%'.format(sign=sign, change=change)
def get_quote_from_yahoo(symbol):
"""Return a tuple containing market quote information for a symbol.
Information is requested from Yahoo.
"""
symbol = validate_stock_symbol(symbol)
base_url = "chartapi.finance.yahoo.com"
query_path = "/instrument/1.0/" + symbol + "/chartdata;type=quote;range=1d/csv/"
conn = httplib.HTTPConnection(base_url)
# Make request
conn.request("GET", query_path)
response = conn.getresponse().read().rsplit()
# Return last quote
lastReading = response[-1].split(",")
current, high, low = float(lastReading[1]), float(lastReading[2]), float(lastReading[3])
return current, high, low
def get_time_series_from_yahoo(symbol, period):
base_url = "chartapi.finance.yahoo.com"
query_path = "/instrument/1.0/{symbol}/chartdata;type=quote;range={period}/csv/"
if period not in ('1d', '5d', '30d', '1m', '3m', '6m', '1y'):
period = '1d'
symbol = validate_stock_symbol(symbol)
# Make request
conn = httplib.HTTPConnection(base_url)
conn.request("GET", query_path.format(symbol=symbol, period=period))
response = conn.getresponse().read().rsplit()
response = [r.split(',') for r in response if ':' not in r]
response = [r for r in response if len(r) == 6]
results = []
for r in response:
if period[-1] == 'd':
# use epoch time
t = time.localtime(float(r[0]))
else:
# use strptime
t = time.strptime(r[0], '%Y%m%d')
t = time.strftime('%Y-%m-%dT%H:%M:%SZ', t)
price = float(r[1])
results.append({'time': t, 'quote': price})
return results
def get_time_series_from_google(symbol, period):
# NOTE DATA IS INCONSISTENT UPON MULTIPLE QUERIES..
base_url = "www.google.com"
QUERY_PATHS = {
'1d': '/finance/getprices?i=234&p=1d&f=d,c&df=cpct&q=',
'5d': '/finance/getprices?i=1170&p=5d&f=d,c&df=cpct&q=',
'1m': '/finance/getprices?i=7020&p=30d&f=d,c&df=cpct&q=',
'3m': '/finance/getprices?i=21060&p=90d&f=d,c&df=cpct&q=',
'1y': '/finance/getprices?i=60840&p=260d&f=d,c&df=cpct&q=',
'default': '/finance/getprices?i=234&p=1d&f=d,c&df=cpct&q=',
}
if period not in QUERY_PATHS:
period = 'default'
# Query must contain symbols in capital letters
symbol = validate_stock_symbol(symbol)
# Make request
conn = httplib.HTTPConnection(base_url)
conn.request("GET", QUERY_PATHS[str(period)] + symbol)
response = conn.getresponse().read().rsplit()
# Check if symbol was valid.
# strips away 'EXCHANGE%3D' from first line of response
checkQuote = str(response[0][11:])
if (checkQuote == 'UNKNOWN+EXCHANGE'):
raise ValueError("Symbol is not listed.")
# Return last quote
results = response[7:]
results = [tuple(r.split(',')) for r in results if 'TIMEZONE_OFFSET' not in r]
formatted_results = []
for date, price in results:
t = time.localtime(float(date[1:]))
t = time.strftime('%Y-%m-%dT%H:%M:%SZ', t)
res = {'time': t, 'quote': float(price)}
formatted_results.append(res)
return formatted_results
if __name__ == "__main__":
symbol = sys.argv[1]
latest = get_quote_from_yahoo(symbol)
print """
{symbol}
--------
Current: {curr}
High: {high}
Low: {low}
""".format(
symbol=symbol.upper(),
curr=latest[0],
high=latest[1],
low=latest[1],
)
|
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
J-metric density fitting
'''
import time
import copy
import tempfile
import numpy
import h5py
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.df import incore
from pyscf.df import outcore
from pyscf.df import r_incore
from pyscf.df import addons
from pyscf.df import df_jk
from pyscf.ao2mo import _ao2mo
from pyscf.ao2mo.incore import _conc_mos, iden_coeffs
from pyscf.ao2mo.outcore import _load_from_h5g
from pyscf import __config__
class DF(lib.StreamObject):
r'''
Object to hold 3-index tensor
Attributes:
auxbasis : str or dict
Same input format as :attr:`Mole.basis`
auxmol : Mole object
Read only Mole object to hold the auxiliary basis. auxmol is
generated automatically in the initialization step based on the
given auxbasis. It is used in the rest part of the code to
determine the problem size, the integral batches etc. This object
should NOT be modified.
_cderi_to_save : str
If _cderi_to_save is specified, the DF integral tensor will be
saved in this file.
_cderi : str or numpy array
If _cderi is specified, the DF integral tensor will be read from
this HDF5 file (or numpy array). When the DF integral tensor is
provided from the HDF5 file, it has to be stored under the dataset
'j3c'.
The DF integral tensor :math:`V_{x,ij}` should be a 2D array in C
(row-major) convention, where x corresponds to index of auxiliary
basis, and the combined index ij is the orbital pair index. The
hermitian symmetry is assumed for the combined ij index, ie
the elements of :math:`V_{x,i,j}` with :math:`i\geq j` are existed
in the DF integral tensor. Thus the shape of DF integral tensor
is (M,N*(N+1)/2), where M is the number of auxbasis functions and
N is the number of basis functions of the orbital basis.
blockdim : int
When reading DF integrals from disk the chunk size to load. It is
used to improve IO performance.
'''
blockdim = getattr(__config__, 'df_df_DF_blockdim', 240)
# Store DF tensor in a format compatible to pyscf-1.1 - pyscf-1.6
_compatible_format = getattr(__config__, 'df_df_DF_compatible_format', False)
def __init__(self, mol, auxbasis=None):
self.mol = mol
self.stdout = mol.stdout
self.verbose = mol.verbose
self.max_memory = mol.max_memory
self._auxbasis = auxbasis
##################################################
# Following are not input options
self.auxmol = None
# If _cderi_to_save is specified, the 3C-integral tensor will be saved in this file.
self._cderi_to_save = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# If _cderi is specified, the 3C-integral tensor will be read from this file
self._cderi = None
self._vjopt = None
self._rsh_df = {} # Range separated Coulomb DF objects
self._keys = set(self.__dict__.keys())
@property
def auxbasis(self):
return self._auxbasis
@auxbasis.setter
def auxbasis(self, x):
if self._auxbasis != x:
self.reset()
self._auxbasis = x
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('******** %s ********', self.__class__)
if self.auxmol is None:
log.info('auxbasis = %s', self.auxbasis)
else:
log.info('auxbasis = auxmol.basis = %s', self.auxmol.basis)
log.info('max_memory = %s', self.max_memory)
if isinstance(self._cderi, str):
log.info('_cderi = %s where DF integrals are loaded (readonly).',
self._cderi)
if isinstance(self._cderi_to_save, str):
log.info('_cderi_to_save = %s', self._cderi_to_save)
else:
log.info('_cderi_to_save = %s', self._cderi_to_save.name)
return self
def build(self):
t0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
self.check_sanity()
self.dump_flags()
mol = self.mol
auxmol = self.auxmol = addons.make_auxmol(self.mol, self.auxbasis)
nao = mol.nao_nr()
naux = auxmol.nao_nr()
nao_pair = nao*(nao+1)//2
max_memory = self.max_memory - lib.current_memory()[0]
int3c = mol._add_suffix('int3c2e')
int2c = mol._add_suffix('int2c2e')
if (nao_pair*naux*8/1e6 < .9*max_memory and
not isinstance(self._cderi_to_save, str)):
self._cderi = incore.cholesky_eri(mol, int3c=int3c, int2c=int2c,
auxmol=auxmol,
max_memory=max_memory, verbose=log)
else:
if isinstance(self._cderi_to_save, str):
cderi = self._cderi_to_save
else:
cderi = self._cderi_to_save.name
if isinstance(self._cderi, str):
# If cderi needs to be saved in
log.warn('Value of _cderi is ignored. DF integrals will be '
'saved in file %s .', cderi)
if self._compatible_format or isinstance(self._cderi_to_save, str):
outcore.cholesky_eri(mol, cderi, dataname='j3c',
int3c=int3c, int2c=int2c, auxmol=auxmol,
max_memory=max_memory, verbose=log)
else:
# Store DF tensor in blocks. This is to reduce the
# initiailzation overhead
outcore.cholesky_eri_b(mol, cderi, dataname='j3c',
int3c=int3c, int2c=int2c, auxmol=auxmol,
max_memory=max_memory, verbose=log)
self._cderi = cderi
log.timer_debug1('Generate density fitting integrals', *t0)
return self
def kernel(self, *args, **kwargs):
return self.build(*args, **kwargs)
def reset(self, mol=None):
'''Reset mol and clean up relevant attributes for scanner mode'''
if mol is not None:
self.mol = mol
self.auxmol = None
self._cderi = None
if not isinstance(self._cderi_to_save, str):
self._cderi_to_save = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
self._vjopt = None
self._rsh_df = {}
return self
def loop(self, blksize=None):
if self._cderi is None:
self.build()
if blksize is None:
blksize = self.blockdim
with addons.load(self._cderi, 'j3c') as feri:
if isinstance(feri, numpy.ndarray):
naoaux = feri.shape[0]
for b0, b1 in self.prange(0, naoaux, blksize):
yield numpy.asarray(feri[b0:b1], order='C')
else:
if isinstance(feri, h5py.Group):
# starting from pyscf-1.7, DF tensor may be stored in
# block format
naoaux = feri['0'].shape[0]
def load(b0, b1, prefetch):
prefetch[0] = _load_from_h5g(feri, b0, b1)
else:
naoaux = feri.shape[0]
def load(b0, b1, prefetch):
prefetch[0] = numpy.asarray(feri[b0:b1])
dat = [None]
prefetch = [None]
with lib.call_in_background(load) as bload:
bload(0, min(blksize, naoaux), prefetch)
for b0, b1 in self.prange(blksize, naoaux, blksize):
dat, prefetch = prefetch, dat
bload(b0, b1, prefetch)
yield dat[0]
yield prefetch[0]
def prange(self, start, end, step):
for i in range(start, end, step):
yield i, min(i+step, end)
def get_naoaux(self):
# determine naoaux with self._cderi, because DF object may be used as CD
# object when self._cderi is provided.
if self._cderi is None:
self.build()
with addons.load(self._cderi, 'j3c') as feri:
if isinstance(feri, h5py.Group):
return feri['0'].shape[0]
else:
return feri.shape[0]
def get_jk(self, dm, hermi=1, with_j=True, with_k=True,
direct_scf_tol=getattr(__config__, 'scf_hf_SCF_direct_scf_tol', 1e-13),
omega=None):
if omega is None:
return df_jk.get_jk(self, dm, hermi, with_j, with_k, direct_scf_tol)
# A temporary treatment for RSH-DF integrals
key = '%.6f' % omega
if key in self._rsh_df:
rsh_df = self._rsh_df[key]
else:
rsh_df = self._rsh_df[key] = copy.copy(self).reset()
logger.info(self, 'Create RSH-DF object %s for omega=%s', rsh_df, omega)
with rsh_df.mol.with_range_coulomb(omega):
return df_jk.get_jk(rsh_df, dm, hermi, with_j, with_k, direct_scf_tol)
def get_eri(self):
nao = self.mol.nao_nr()
nao_pair = nao * (nao+1) // 2
ao_eri = numpy.zeros((nao_pair,nao_pair))
for eri1 in self.loop():
lib.dot(eri1.T, eri1, 1, ao_eri, 1)
return ao2mo.restore(8, ao_eri, nao)
get_ao_eri = get_eri
def ao2mo(self, mo_coeffs,
compact=getattr(__config__, 'df_df_DF_ao2mo_compact', True)):
if isinstance(mo_coeffs, numpy.ndarray) and mo_coeffs.ndim == 2:
mo_coeffs = (mo_coeffs,) * 4
ijmosym, nij_pair, moij, ijslice = _conc_mos(mo_coeffs[0], mo_coeffs[1], compact)
klmosym, nkl_pair, mokl, klslice = _conc_mos(mo_coeffs[2], mo_coeffs[3], compact)
mo_eri = numpy.zeros((nij_pair,nkl_pair))
sym = (iden_coeffs(mo_coeffs[0], mo_coeffs[2]) and
iden_coeffs(mo_coeffs[1], mo_coeffs[3]))
Lij = Lkl = None
for eri1 in self.loop():
Lij = _ao2mo.nr_e2(eri1, moij, ijslice, aosym='s2', mosym=ijmosym, out=Lij)
if sym:
Lkl = Lij
else:
Lkl = _ao2mo.nr_e2(eri1, mokl, klslice, aosym='s2', mosym=klmosym, out=Lkl)
lib.dot(Lij.T, Lkl, 1, mo_eri, 1)
return mo_eri
get_mo_eri = ao2mo
GDF = DF
class DF4C(DF):
'''Relativistic 4-component'''
def build(self):
log = logger.Logger(self.stdout, self.verbose)
mol = self.mol
auxmol = self.auxmol = addons.make_auxmol(self.mol, self.auxbasis)
n2c = mol.nao_2c()
naux = auxmol.nao_nr()
nao_pair = n2c*(n2c+1)//2
max_memory = (self.max_memory - lib.current_memory()[0]) * .8
if nao_pair*naux*3*16/1e6*2 < max_memory:
self._cderi =(r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2',
int3c='int3c2e_spinor', verbose=log),
r_incore.cholesky_eri(mol, auxmol=auxmol, aosym='s2',
int3c='int3c2e_spsp1_spinor', verbose=log))
else:
raise NotImplementedError
return self
def loop(self, blksize=None):
if self._cderi is None:
self.build()
if blksize is None:
blksize = self.blockdim
with addons.load(self._cderi[0], 'j3c') as ferill:
naoaux = ferill.shape[0]
with addons.load(self._cderi[1], 'j3c') as feriss: # python2.6 not support multiple with
for b0, b1 in self.prange(0, naoaux, blksize):
erill = numpy.asarray(ferill[b0:b1], order='C')
eriss = numpy.asarray(feriss[b0:b1], order='C')
yield erill, eriss
def get_jk(self, dm, hermi=1, with_j=True, with_k=True,
direct_scf_tol=getattr(__config__, 'scf_hf_SCF_direct_scf_tol', 1e-13),
omega=None):
if omega is None:
return df_jk.r_get_jk(self, dm, hermi, with_j, with_k)
# A temporary treatment for RSH-DF integrals
key = '%.6f' % omega
if key in self._rsh_df:
rsh_df = self._rsh_df[key]
else:
rsh_df = self._rsh_df[key] = copy.copy(self).reset()
logger.info(self, 'Create RSH-DF object %s for omega=%s', rsh_df, omega)
with rsh_df.mol.with_range_coulomb(omega):
return df_jk.r_get_jk(rsh_df, dm, hermi, with_j, with_k)
def ao2mo(self, mo_coeffs):
raise NotImplementedError
GDF4C = DF4C
|
|
"""
-------
Convert
-------
This module provides functions to convert
NetworkX graphs to and from other formats.
The preferred way of converting data to a NetworkX graph
is through the graph constuctor. The constructor calls
the from_whatever() function which attempts to guess the
input type and convert it automatically.
Examples
--------
Create a 10 node random graph from a numpy matrix
>>> import numpy
>>> a=numpy.reshape(numpy.random.random_integers(0,1,size=100),(10,10))
>>> D=nx.DiGraph(a)
or equivalently
>>> D=nx.from_whatever(a,create_using=nx.DiGraph())
Create a graph with a single edge from a dictionary of dictionaries
>>> d={0: {1: 1}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
See Also
--------
For graphviz dot formats see networkx.drawing.nx_pygraphviz
or networkx.drawing.nx_pydot.
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2006-2008 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['from_whatever',
'from_dict_of_dicts', 'to_dict_of_dicts',
'from_dict_of_lists', 'to_dict_of_lists',
'from_edgelist', 'to_edgelist',
'from_numpy_matrix', 'to_numpy_matrix',
'from_scipy_sparse_matrix', 'to_scipy_sparse_matrix']
import networkx
import warnings
def _prep_create_using(create_using):
"""Return a graph object ready to be populated.
If create_using is None return the default (just networkx.Graph())
If create_using.clear() works, assume it returns a graph object.
Otherwise raise an exception because create_using is not a networkx graph.
"""
if create_using is None:
G=networkx.Graph()
else:
G=create_using
try:
G.clear()
except:
raise TypeError("Input graph is not a networkx graph type")
return G
def from_whatever(thing,create_using=None,multigraph_input=False):
"""Make a NetworkX graph from an known type.
The preferred way to call this is automatically
from the class constructor
>>> d={0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G=nx.Graph(d)
instead of the equivalent
>>> G=nx.from_dict_of_dicts(d)
Parameters
----------
thing : a object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dist-of-lists
list of edges
numpy matrix
numpy ndarray
scipy sparse matrix
pygraphviz agraph
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
If True and thing is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If thing and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# NX graph
if hasattr(thing,"adj"):
try:
result= from_dict_of_dicts(thing.adj,\
create_using=create_using,\
multigraph_input=thing.is_multigraph())
if hasattr(thing,'graph') and isinstance(thing.graph,dict):
result.graph=thing.graph.copy()
if hasattr(thing,'node') and isinstance(thing.node,dict):
result.node=dict( (n,dd.copy()) for n,dd in thing.node.iteritems() )
return result
except:
raise networkx.NetworkXError,\
"Input is not a correct NetworkX graph."
# pygraphviz agraph
if hasattr(thing,"is_strict"):
try:
return networkx.from_agraph(thing,create_using=create_using)
except:
raise networkx.NetworkXError,\
"Input is not a correct pygraphviz graph."
# dict of dicts/lists
if isinstance(thing,dict):
try:
return from_dict_of_dicts(thing,create_using=create_using,\
multigraph_input=multigraph_input)
except:
try:
return from_dict_of_lists(thing,create_using=create_using)
except:
raise TypeError("Input is not known type.")
# list or generator of edges
if isinstance(thing,list) or hasattr(thing,'next'):
try:
return from_edgelist(thing,create_using=create_using)
except:
raise networkx.NetworkXError,\
"Input is not a valid edge list"
# numpy matrix or ndarray
try:
import numpy
if isinstance(thing,numpy.core.defmatrix.matrix) or \
isinstance(thing,numpy.ndarray):
try:
return from_numpy_matrix(thing,create_using=create_using)
except:
raise networkx.NetworkXError,\
"Input is not a correct numpy matrix or array."
except ImportError:
warnings.warn('numpy not found, skipping conversion test.',
ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(thing,"format"):
try:
return from_scipy_sparse_matrix(thing,create_using=create_using)
except:
raise networkx.NetworkXError, \
"Input is not a correct scipy sparse matrix type."
except ImportError:
warnings.warn('scipy not found, skipping conversion test.',
ImportWarning)
raise networkx.NetworkXError, \
"Input is not a known data type for conversion."
return
def to_dict_of_lists(G,nodelist=None):
"""Return adjacency representation of graph as a dictionary of lists
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
Notes
-----
Completely ignores edge data for MultiGraph and MultiDiGraph.
"""
if nodelist is None:
nodelist=G
d = {}
for n in nodelist:
d[n]=[nbr for nbr in G.neighbors(n) if nbr in nodelist]
return d
def from_dict_of_lists(d,create_using=None):
"""Return a graph from a dictionary of lists.
Parameters
----------
d : dictionary of lists
A dictionary of lists adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> dol= {0:[1]} # single edge (0,1)
>>> G=nx.from_dict_of_lists(dol)
or
>>> G=nx.Graph(dol) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
if G.is_multigraph() and not G.is_directed():
# a dict_of_lists can't show multiedges. BUT for undirected graphs,
# each edge shows up twice in the dict_of_lists.
# So we need to treat this case separately.
seen={}
for node,nbrlist in d.iteritems():
for nbr in nbrlist:
if nbr not in seen:
G.add_edge(node,nbr)
seen[node]=1 # don't allow reverse edge to show up
else:
G.add_edges_from( ((node,nbr) for node,nbrlist in d.iteritems()
for nbr in nbrlist) )
return G
def to_dict_of_dicts(G,nodelist=None,edge_data=None):
"""Return adjacency representation of graph as a dictionary of dictionaries
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
edge_data : list, optional
If provided, the value of the dictionary will be
set to edge_data for all edges. This is useful to make
an adjacency matrix type representation with 1 as the edge data.
If edgedata is None, the edgedata in G is used to fill the values.
If G is a multigraph, the edgedata is a dict for each pair (u,v).
"""
dod={}
if nodelist is None:
if edge_data is None:
for u,nbrdict in G.adjacency_iter():
dod[u]=nbrdict.copy()
else: # edge_data is not None
for u,nbrdict in G.adjacency_iter():
dod[u]=dod.fromkeys(nbrdict, edge_data)
else: # nodelist is not None
if edge_data is None:
for u in nodelist:
dod[u]={}
for v,data in ((v,data) for v,data in G[u].iteritems() if v in nodelist):
dod[u][v]=data
else: # nodelist and edge_data are not None
for u in nodelist:
dod[u]={}
for v in ( v for v in G[u] if v in nodelist):
dod[u][v]=edge_data
return dod
def from_dict_of_dicts(d,create_using=None,multigraph_input=False):
"""Return a graph from a dictionary of dictionaries.
Parameters
----------
d : dictionary of dictionaries
A dictionary of dictionaries adjacency representation.
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
multigraph_input : bool (default False)
When True, the values of the inner dict are assumed
to be containers of edge data for multiple edges.
Otherwise this routine assumes the edge data are singletons.
Examples
--------
>>> dod= {0: {1:{'weight':1}}} # single edge (0,1)
>>> G=nx.from_dict_of_dicts(dod)
or
>>> G=nx.Graph(dod) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_nodes_from(d)
# is dict a MultiGraph or MultiDiGraph?
if multigraph_input:
# make a copy of the list of edge data (but not the edge data)
if G.is_directed():
if G.is_multigraph():
G.add_edges_from( (u,v,key,data)
for u,nbrs in d.iteritems()
for v,datadict in nbrs.iteritems()
for key,data in datadict.items()
)
else:
G.add_edges_from( (u,v,data)
for u,nbrs in d.iteritems()
for v,datadict in nbrs.iteritems()
for key,data in datadict.items()
)
else: # Undirected
if G.is_multigraph():
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.iteritems():
for v,datadict in nbrs.iteritems():
if v not in seen:
G.add_edges_from( (u,v,key,data)
for key,data in datadict.items()
)
seen.add(u)
else:
seen=set() # don't add both directions of undirected graph
for u,nbrs in d.iteritems():
for v,datadict in nbrs.iteritems():
if v not in seen:
G.add_edges_from( (u,v,data)
for key,data in datadict.items() )
seen.add(u)
else: # not a multigraph to multigraph transfer
if G.is_directed():
G.add_edges_from( ( (u,v,data)
for u,nbrs in d.iteritems()
for v,data in nbrs.iteritems()) )
# need this if G is multigraph and slightly faster if not multigraph
else:
seen=set()
for u,nbrs in d.iteritems():
for v,data in nbrs.iteritems():
if v not in seen:
G.add_edge(u,v,attr_dict=data)
seen.add(u)
return G
def to_edgelist(G,nodelist=None):
"""Return a list of edges in the graph.
Parameters
----------
G : graph
A NetworkX graph
nodelist : list
Use only nodes specified in nodelist
"""
if nodelist is None:
return G.edges(data=True)
else:
return G.edges(nodelist,data=True)
def from_edgelist(edgelist,create_using=None):
"""Return a graph from a list of edges.
Parameters
----------
edgelist : list or iterator
Edge tuples
create_using : NetworkX graph
Use specified graph for result. Otherwise a new graph is created.
Examples
--------
>>> edgelist= [(0,1)] # single edge (0,1)
>>> G=nx.from_edgelist(edgelist)
or
>>> G=nx.Graph(edgelist) # use Graph constructor
"""
G=_prep_create_using(create_using)
G.add_edges_from(edgelist)
return G
def to_numpy_matrix(G,nodelist=None,dtype=None,order=None):
"""Return the graph adjacency matrix as a NumPy matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. If None, then the NumPy default
is used.
Returns
-------
M : NumPy matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the 'weight' edge attribute. When
an edge does not have the 'weight' attribute, the value of the entry is 1.
For multiple edges, the values of the entries are the sums of the edge
attributes for each edge.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> nx.to_numpy_matrix(G, nodelist=[0,1,2])
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
try:
import numpy as np
except ImportError:
raise ImportError, \
"to_numpy_matrix() requires numpy: http://scipy.org/ "
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise networkx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = np.zeros((nlen,nlen), dtype=dtype, order=order)
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
M[i,j] += attrs.get('weight', 1)
if undirected:
M[j,i] = M[i,j]
M = np.asmatrix(M)
return M
def from_numpy_matrix(A,create_using=None):
"""Return a graph from numpy matrix adjacency list.
Parameters
----------
A : numpy matrix
An adjacency matrix representation of a graph
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Examples
--------
>>> import numpy
>>> A=numpy.matrix([[1,1],[2,1]])
>>> G=nx.from_numpy_matrix(A)
"""
# This should never fail if you have created a numpy matrix with numpy...
try:
import numpy as np
except ImportError:
raise ImportError, \
"from_numpy_matrix() requires numpy: http://scipy.org/ "
G=_prep_create_using(create_using)
nx,ny=A.shape
if nx!=ny:
raise networkx.NetworkXError, \
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,)
G.add_nodes_from(range(nx)) # make sure we get isolated nodes
# get a list of edges
x,y=np.asarray(A).nonzero()
G.add_edges_from( ((u,v,{'weight':A[u,v]}) for (u,v) in zip(x,y)) )
return G
def to_scipy_sparse_matrix(G,nodelist=None,dtype=None):
"""Return the graph adjacency matrix as a SciPy sparse matrix.
Parameters
----------
G : graph
The NetworkX graph used to construct the NumPy matrix.
nodelist : list, optional
The rows and columns are ordered according to the nodes in `nodelist`.
If `nodelist` is None, then the ordering is produced by G.nodes().
dtype : NumPy data-type, optional
A valid NumPy dtype used to initialize the array. If None, then the
NumPy default is used.
Returns
-------
M : SciPy sparse matrix
Graph adjacency matrix.
Notes
-----
The matrix entries are populated using the 'weight' edge attribute. When
an edge does not have the 'weight' attribute, the value of the entry is 1.
For multiple edges, the values of the entries are the sums of the edge
attributes for each edge.
When `nodelist` does not contain every node in `G`, the matrix is built
from the subgraph of `G` that is induced by the nodes in `nodelist`.
Uses lil_matrix format. To convert to other formats see the documentation
for scipy.sparse.
Examples
--------
>>> G = nx.MultiDiGraph()
>>> G.add_edge(0,1,weight=2)
>>> G.add_edge(1,0)
>>> G.add_edge(2,2,weight=3)
>>> G.add_edge(2,2)
>>> S = nx.to_scipy_sparse_matrix(G, nodelist=[0,1,2])
>>> S.todense()
matrix([[ 0., 2., 0.],
[ 1., 0., 0.],
[ 0., 0., 4.]])
"""
try:
from scipy import sparse
except ImportError:
raise ImportError, \
"to_scipy_sparse_matrix() requires scipy: http://scipy.org/ "
if nodelist is None:
nodelist = G.nodes()
nodeset = set(nodelist)
if len(nodelist) != len(nodeset):
msg = "Ambiguous ordering: `nodelist` contained duplicates."
raise networkx.NetworkXError(msg)
nlen=len(nodelist)
undirected = not G.is_directed()
index=dict(zip(nodelist,range(nlen)))
M = sparse.lil_matrix((nlen,nlen), dtype=dtype)
for u,v,attrs in G.edges_iter(data=True):
if (u in nodeset) and (v in nodeset):
i,j = index[u],index[v]
M[i,j] += attrs.get('weight', 1)
if undirected:
M[j,i] = M[i,j]
return M
def from_scipy_sparse_matrix(A,create_using=None):
"""Return a graph from scipy sparse matrix adjacency list.
Parameters
----------
A : scipy sparse matrix
An adjacency matrix representation of a graph
create_using : NetworkX graph
Use specified graph for result. The default is Graph()
Examples
--------
>>> import scipy.sparse
>>> A=scipy.sparse.eye(2,2,1)
>>> G=nx.from_scipy_sparse_matrix(A)
"""
G=_prep_create_using(create_using)
# convert all formats to lil - not the most efficient way
AA=A.tolil()
nx,ny=AA.shape
if nx!=ny:
raise networkx.NetworkXError, \
"Adjacency matrix is not square. nx,ny=%s"%(A.shape,)
G.add_nodes_from(range(nx)) # make sure we get isolated nodes
for i,row in enumerate(AA.rows):
for pos,j in enumerate(row):
G.add_edge(i,j,**{'weight':AA.data[i][pos]})
return G
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import vm_states
from nova.conductor.tasks import live_migrate
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import query
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import test
from nova.tests.unit import fake_instance
fake_limits1 = objects.SchedulerLimits()
fake_selection1 = objects.Selection(service_host="host1", nodename="node1",
cell_uuid=uuids.cell, limits=fake_limits1,
compute_node_uuid=uuids.compute_node1)
fake_limits2 = objects.SchedulerLimits()
fake_selection2 = objects.Selection(service_host="host2", nodename="node2",
cell_uuid=uuids.cell, limits=fake_limits2,
compute_node_uuid=uuids.compute_node2)
class LiveMigrationTaskTestCase(test.NoDBTestCase):
def setUp(self):
super(LiveMigrationTaskTestCase, self).setUp()
self.context = nova_context.get_admin_context()
self.instance_host = "host"
self.instance_uuid = uuids.instance
self.instance_image = "image_ref"
db_instance = fake_instance.fake_db_instance(
host=self.instance_host,
uuid=self.instance_uuid,
power_state=power_state.RUNNING,
vm_state = vm_states.ACTIVE,
memory_mb=512,
image_ref=self.instance_image)
self.instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
self.instance.system_metadata = {'image_hw_disk_bus': 'scsi'}
self.instance.numa_topology = None
self.instance.pci_requests = None
self.instance.resources = None
self.destination = "destination"
self.block_migration = "bm"
self.disk_over_commit = "doc"
self.migration = objects.Migration()
self.fake_spec = objects.RequestSpec()
self._generate_task()
_p = mock.patch('nova.compute.utils.heal_reqspec_is_bfv')
self.heal_reqspec_is_bfv_mock = _p.start()
self.addCleanup(_p.stop)
_p = mock.patch('nova.objects.RequestSpec.ensure_network_metadata')
self.ensure_network_metadata_mock = _p.start()
self.addCleanup(_p.stop)
_p = mock.patch(
'nova.network.neutron.API.'
'get_requested_resource_for_instance',
return_value=[])
self.mock_get_res_req = _p.start()
self.addCleanup(_p.stop)
def _generate_task(self):
self.task = live_migrate.LiveMigrationTask(self.context,
self.instance, self.destination, self.block_migration,
self.disk_over_commit, self.migration, compute_rpcapi.ComputeAPI(),
servicegroup.API(), query.SchedulerQueryClient(),
report.SchedulerReportClient(), self.fake_spec)
@mock.patch('nova.availability_zones.get_host_availability_zone',
return_value='fake-az')
def test_execute_with_destination(self, mock_get_az):
dest_node = objects.ComputeNode(hypervisor_hostname='dest_node')
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
mock.patch.object(self.task, '_check_requested_destination'),
mock.patch.object(scheduler_utils,
'claim_resources_on_destination'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
mock.patch('nova.conductor.tasks.migrate.'
'replace_allocation_with_migration'),
mock.patch.object(self.task, '_check_destination_is_not_source'),
mock.patch.object(self.task,
'_check_destination_has_enough_memory'),
mock.patch.object(self.task,
'_check_compatible_with_source_hypervisor',
return_value=(mock.sentinel.source_node,
dest_node)),
) as (mock_check_up, mock_check_dest, mock_claim, mock_save, mock_mig,
m_alloc, m_check_diff, m_check_enough_mem, m_check_compatible):
mock_mig.return_value = "bob"
m_alloc.return_value = (mock.MagicMock(), mock.sentinel.allocs)
self.assertEqual("bob", self.task.execute())
mock_check_up.assert_has_calls([
mock.call(self.instance_host), mock.call(self.destination)])
mock_check_dest.assert_called_once_with()
m_check_diff.assert_called_once()
m_check_enough_mem.assert_called_once()
m_check_compatible.assert_called_once()
allocs = mock.sentinel.allocs
mock_claim.assert_called_once_with(
self.context, self.task.report_client,
self.instance, mock.sentinel.source_node, dest_node,
source_allocations=allocs, consumer_generation=None)
mock_mig.assert_called_once_with(
self.context,
host=self.instance_host,
instance=self.instance,
dest=self.destination,
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None)
self.assertTrue(mock_save.called)
mock_get_az.assert_called_once_with(self.context, self.destination)
self.assertEqual('fake-az', self.instance.availability_zone)
# make sure the source/dest fields were set on the migration object
self.assertEqual(self.instance.node, self.migration.source_node)
self.assertEqual(dest_node.hypervisor_hostname,
self.migration.dest_node)
self.assertEqual(self.task.destination,
self.migration.dest_compute)
m_alloc.assert_called_once_with(self.context,
self.instance,
self.migration)
# When the task is executed with a destination it means the host is
# being forced and we don't call the scheduler, so we don't need to
# heal the request spec.
self.heal_reqspec_is_bfv_mock.assert_not_called()
# When the task is executed with a destination it means the host is
# being forced and we don't call the scheduler, so we don't need to
# modify the request spec
self.ensure_network_metadata_mock.assert_not_called()
@mock.patch('nova.availability_zones.get_host_availability_zone',
return_value='nova')
def test_execute_without_destination(self, mock_get_az):
self.destination = None
self._generate_task()
self.assertIsNone(self.task.destination)
with test.nested(
mock.patch.object(self.task, '_check_host_is_up'),
mock.patch.object(self.task, '_find_destination'),
mock.patch.object(self.task.compute_rpcapi, 'live_migration'),
mock.patch.object(self.migration, 'save'),
mock.patch('nova.conductor.tasks.migrate.'
'replace_allocation_with_migration'),
) as (mock_check, mock_find, mock_mig, mock_save, mock_alloc):
mock_find.return_value = ("found_host", "found_node", None)
mock_mig.return_value = "bob"
mock_alloc.return_value = (mock.MagicMock(), mock.MagicMock())
self.assertEqual("bob", self.task.execute())
mock_check.assert_called_once_with(self.instance_host)
mock_find.assert_called_once_with()
mock_mig.assert_called_once_with(self.context,
host=self.instance_host,
instance=self.instance,
dest="found_host",
block_migration=self.block_migration,
migration=self.migration,
migrate_data=None)
self.assertTrue(mock_save.called)
mock_get_az.assert_called_once_with(self.context, 'found_host')
self.assertEqual('found_host', self.migration.dest_compute)
self.assertEqual('found_node', self.migration.dest_node)
self.assertEqual(self.instance.node, self.migration.source_node)
self.assertTrue(mock_alloc.called)
def test_check_instance_is_active_passes_when_paused(self):
self.task.instance['power_state'] = power_state.PAUSED
self.task._check_instance_is_active()
def test_check_instance_is_active_fails_when_shutdown(self):
self.task.instance['power_state'] = power_state.SHUTDOWN
self.assertRaises(exception.InstanceInvalidState,
self.task._check_instance_is_active)
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
def test_check_instance_has_no_numa_passes_no_numa(self, mock_get):
self.flags(enable_numa_live_migration=False, group='workarounds')
self.task.instance.numa_topology = None
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task._check_instance_has_no_numa()
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
def test_check_instance_has_no_numa_passes_non_kvm(self, mock_get):
self.flags(enable_numa_live_migration=False, group='workarounds')
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='xen')
self.task._check_instance_has_no_numa()
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=39)
def test_check_instance_has_no_numa_passes_workaround(
self, mock_get_min_ver, mock_get):
self.flags(enable_numa_live_migration=True, group='workarounds')
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task._check_instance_has_no_numa()
mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute')
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=39)
def test_check_instance_has_no_numa_fails(self, mock_get_min_ver,
mock_get):
self.flags(enable_numa_live_migration=False, group='workarounds')
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_instance_has_no_numa)
mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute')
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
@mock.patch.object(objects.Service, 'get_minimum_version',
return_value=40)
def test_check_instance_has_no_numa_new_svc_passes(self, mock_get_min_ver,
mock_get):
self.flags(enable_numa_live_migration=False, group='workarounds')
mock_get.return_value = objects.ComputeNode(
uuid=uuids.cn1, hypervisor_type='qemu')
self.task.instance.numa_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([0]), pcpuset=set(), memory=1024),
])
self.task._check_instance_has_no_numa()
mock_get_min_ver.assert_called_once_with(self.context, 'nova-compute')
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(servicegroup.API, 'service_is_up')
def test_check_instance_host_is_up(self, mock_is_up, mock_get):
mock_get.return_value = "service"
mock_is_up.return_value = True
self.task._check_host_is_up("host")
mock_get.assert_called_once_with(self.context, "host")
mock_is_up.assert_called_once_with("service")
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(servicegroup.API, 'service_is_up')
def test_check_instance_host_is_up_fails_if_not_up(self, mock_is_up,
mock_get):
mock_get.return_value = "service"
mock_is_up.return_value = False
self.assertRaises(exception.ComputeServiceUnavailable,
self.task._check_host_is_up, "host")
mock_get.assert_called_once_with(self.context, "host")
mock_is_up.assert_called_once_with("service")
@mock.patch.object(objects.Service, 'get_by_compute_host',
side_effect=exception.ComputeHostNotFound(host='host'))
def test_check_instance_host_is_up_fails_if_not_found(self, mock):
self.assertRaises(exception.ComputeHostNotFound,
self.task._check_host_is_up, "host")
def test_check_destination_fails_with_same_dest(self):
self.task.destination = "same"
self.task.source = "same"
self.assertRaises(exception.UnableToMigrateToSelf,
self.task._check_destination_is_not_source)
@mock.patch.object(objects.ComputeNode,
'get_first_node_by_host_for_old_compat')
def test_check_destination_fails_with_not_enough_memory(
self, mock_get_first):
mock_get_first.return_value = (
objects.ComputeNode(free_ram_mb=513,
memory_mb=1024,
ram_allocation_ratio=0.9,))
# free_ram is bigger than instance.ram (512) but the allocation
# ratio reduces the total available RAM to 410MB
# (1024 * 0.9 - (1024 - 513))
self.assertRaises(exception.MigrationPreCheckError,
self.task._check_destination_has_enough_memory)
mock_get_first.assert_called_once_with(self.context, self.destination)
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_compatible_fails_with_hypervisor_diff(
self, mock_get_info):
mock_get_info.side_effect = [
objects.ComputeNode(hypervisor_type='b'),
objects.ComputeNode(hypervisor_type='a')]
self.assertRaises(exception.InvalidHypervisorType,
self.task._check_compatible_with_source_hypervisor,
self.destination)
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
def test_check_compatible_fails_with_hypervisor_too_old(
self, mock_get_info):
host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
mock_get_info.side_effect = [objects.ComputeNode(**host1),
objects.ComputeNode(**host2)]
self.assertRaises(exception.DestinationHypervisorTooOld,
self.task._check_compatible_with_source_hypervisor,
self.destination)
self.assertEqual([mock.call(self.instance_host),
mock.call(self.destination)],
mock_get_info.call_args_list)
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
def test_check_requested_destination(self, mock_check):
mock_check.return_value = "migrate_data"
self.task.limits = fake_limits1
with test.nested(
mock.patch.object(self.task.network_api,
'supports_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertIsNone(self.task._check_requested_destination())
self.assertEqual("migrate_data", self.task.migrate_data)
mock_check.assert_called_once_with(self.context, self.instance,
self.destination, self.block_migration, self.disk_over_commit,
self.task.migration, fake_limits1)
@mock.patch.object(objects.Service, 'get_by_compute_host')
@mock.patch.object(live_migrate.LiveMigrationTask, '_get_compute_info')
@mock.patch.object(servicegroup.API, 'service_is_up')
@mock.patch.object(compute_rpcapi.ComputeAPI,
'check_can_live_migrate_destination')
@mock.patch.object(objects.HostMapping, 'get_by_host',
return_value=objects.HostMapping(
cell_mapping=objects.CellMapping(
uuid=uuids.different)))
def test_check_requested_destination_fails_different_cells(
self, mock_get_host_mapping, mock_check, mock_is_up,
mock_get_info, mock_get_host):
mock_get_host.return_value = "service"
mock_is_up.return_value = True
hypervisor_details = objects.ComputeNode(
hypervisor_type="a",
hypervisor_version=6.1,
free_ram_mb=513,
memory_mb=512,
ram_allocation_ratio=1.0)
mock_get_info.return_value = hypervisor_details
mock_check.return_value = "migrate_data"
with test.nested(
mock.patch.object(self.task.network_api,
'supports_port_binding_extension',
return_value=False),
mock.patch.object(self.task, '_check_can_migrate_pci')):
ex = self.assertRaises(exception.MigrationPreCheckError,
self.task._check_requested_destination)
self.assertIn('across cells', str(ex))
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
return_value=[[fake_selection1]])
@mock.patch.object(objects.RequestSpec, 'reset_forced_destinations')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_works(self, mock_setup, mock_reset, mock_select,
mock_check, mock_call):
self.assertEqual(("host1", "node1", fake_limits1),
self.task._find_destination())
# Make sure the request_spec was updated to include the cell
# mapping.
self.assertIsNotNone(self.fake_spec.requested_destination.cell)
# Make sure the spec was updated to include the project_id.
self.assertEqual(self.fake_spec.project_id, self.instance.project_id)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_reset.assert_called_once_with()
self.ensure_network_metadata_mock.assert_called_once_with(
self.instance)
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, self.fake_spec, self.instance)
mock_select.assert_called_once_with(self.context, self.fake_spec,
[self.instance.uuid], return_objects=True, return_alternates=False)
mock_check.assert_called_once_with('host1')
mock_call.assert_called_once_with('host1', {})
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
return_value=[[fake_selection1]])
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_no_image_works(self, mock_setup, mock_select,
mock_check, mock_call):
self.instance['image_ref'] = ''
self.assertEqual(("host1", "node1", fake_limits1),
self.task._find_destination())
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_called_once_with(
self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
mock_check.assert_called_once_with('host1')
mock_call.assert_called_once_with('host1', {})
@mock.patch.object(live_migrate.LiveMigrationTask,
'_remove_host_allocations')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
side_effect=[[[fake_selection1]], [[fake_selection2]]])
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def _test_find_destination_retry_hypervisor_raises(
self, error, mock_setup, mock_select, mock_check, mock_call,
mock_remove):
mock_check.side_effect = [error, None]
self.assertEqual(("host2", "node2", fake_limits2),
self.task._find_destination())
# Should have removed allocations for the first host.
mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_has_calls([
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False),
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)])
mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')])
mock_call.assert_called_once_with('host2', {})
def test_find_destination_retry_with_old_hypervisor(self):
self._test_find_destination_retry_hypervisor_raises(
exception.DestinationHypervisorTooOld)
def test_find_destination_retry_with_invalid_hypervisor_type(self):
self._test_find_destination_retry_hypervisor_raises(
exception.InvalidHypervisorType)
@mock.patch.object(live_migrate.LiveMigrationTask,
'_remove_host_allocations')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
side_effect=[[[fake_selection1]], [[fake_selection2]]])
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_retry_with_invalid_livem_checks(
self, mock_setup, mock_select, mock_check, mock_call, mock_remove):
self.flags(migrate_max_retries=1)
mock_call.side_effect = [exception.Invalid(), None]
self.assertEqual(("host2", "node2", fake_limits2),
self.task._find_destination())
# Should have removed allocations for the first host.
mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_has_calls([
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False),
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)])
mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')])
mock_call.assert_has_calls(
[mock.call('host1', {}), mock.call('host2', {})])
@mock.patch.object(live_migrate.LiveMigrationTask,
'_remove_host_allocations')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
side_effect=[[[fake_selection1]], [[fake_selection2]]])
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_retry_with_failed_migration_pre_checks(
self, mock_setup, mock_select, mock_check, mock_call, mock_remove):
self.flags(migrate_max_retries=1)
mock_call.side_effect = [exception.MigrationPreCheckError('reason'),
None]
self.assertEqual(("host2", "node2", fake_limits2),
self.task._find_destination())
# Should have removed allocations for the first host.
mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_has_calls([
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False),
mock.call(self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)])
mock_check.assert_has_calls([mock.call('host1'), mock.call('host2')])
mock_call.assert_has_calls(
[mock.call('host1', {}), mock.call('host2', {})])
@mock.patch.object(objects.Migration, 'save')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_remove_host_allocations')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor',
side_effect=exception.DestinationHypervisorTooOld())
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
return_value=[[fake_selection1]])
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_retry_exceeds_max(
self, mock_setup, mock_select, mock_check, mock_remove, mock_save):
self.flags(migrate_max_retries=0)
self.assertRaises(exception.MaxRetriesExceeded,
self.task._find_destination)
self.assertEqual('failed', self.task.migration.status)
mock_save.assert_called_once_with()
# Should have removed allocations for the first host.
mock_remove.assert_called_once_with(fake_selection1.compute_node_uuid)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_called_once_with(
self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
mock_check.assert_called_once_with('host1')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
side_effect=exception.NoValidHost(reason=""))
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_when_runs_out_of_hosts(self, mock_setup,
mock_select):
self.assertRaises(exception.NoValidHost, self.task._find_destination)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_select.assert_called_once_with(
self.context, self.fake_spec, [self.instance.uuid],
return_objects=True, return_alternates=False)
@mock.patch("nova.utils.get_image_from_system_metadata")
@mock.patch("nova.scheduler.utils.build_request_spec")
@mock.patch("nova.scheduler.utils.setup_instance_group")
@mock.patch("nova.objects.RequestSpec.from_primitives")
def test_find_destination_with_remoteError(self,
m_from_primitives, m_setup_instance_group,
m_build_request_spec, m_get_image_from_system_metadata):
m_get_image_from_system_metadata.return_value = {'properties': {}}
m_build_request_spec.return_value = {}
fake_spec = objects.RequestSpec()
m_from_primitives.return_value = fake_spec
with mock.patch.object(self.task.query_client,
'select_destinations') as m_select_destinations:
error = messaging.RemoteError()
m_select_destinations.side_effect = error
self.assertRaises(exception.MigrationSchedulerRPCError,
self.task._find_destination)
def test_call_livem_checks_on_host(self):
with test.nested(
mock.patch.object(self.task.compute_rpcapi,
'check_can_live_migrate_destination',
side_effect=messaging.MessagingTimeout),
mock.patch.object(self.task, '_check_can_migrate_pci')):
self.assertRaises(exception.MigrationPreCheckError,
self.task._call_livem_checks_on_host, {}, {})
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
def test_bind_ports_on_destination_merges_profiles(self, mock_bind_ports):
"""Assert that if both the migration_data and the provider mapping
contains binding profile related information then such information is
merged in the resulting profile.
"""
self.task.migrate_data = objects.LibvirtLiveMigrateData(
vifs=[
objects.VIFMigrateData(
port_id=uuids.port1,
profile_json=jsonutils.dumps(
{'some-key': 'value'}))
])
provider_mappings = {uuids.port1: [uuids.dest_bw_rp]}
self.task._bind_ports_on_destination('dest-host', provider_mappings)
mock_bind_ports.assert_called_once_with(
context=self.context, instance=self.instance, host='dest-host',
vnic_types=None,
port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp,
'some-key': 'value'}})
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
def test_bind_ports_on_destination_migration_data(self, mock_bind_ports):
"""Assert that if only the migration_data contains binding profile
related information then that is sent to neutron.
"""
self.task.migrate_data = objects.LibvirtLiveMigrateData(
vifs=[
objects.VIFMigrateData(
port_id=uuids.port1,
profile_json=jsonutils.dumps(
{'some-key': 'value'}))
])
provider_mappings = {}
self.task._bind_ports_on_destination('dest-host', provider_mappings)
mock_bind_ports.assert_called_once_with(
context=self.context, instance=self.instance, host='dest-host',
vnic_types=None,
port_profiles={uuids.port1: {'some-key': 'value'}})
@mock.patch('nova.network.neutron.API.bind_ports_to_host')
def test_bind_ports_on_destination_provider_mapping(self, mock_bind_ports):
"""Assert that if only the provider mapping contains binding
profile related information then that is sent to neutron.
"""
self.task.migrate_data = objects.LibvirtLiveMigrateData(
vifs=[
objects.VIFMigrateData(
port_id=uuids.port1)
])
provider_mappings = {uuids.port1: [uuids.dest_bw_rp]}
self.task._bind_ports_on_destination('dest-host', provider_mappings)
mock_bind_ports.assert_called_once_with(
context=self.context, instance=self.instance, host='dest-host',
vnic_types=None,
port_profiles={uuids.port1: {'allocation': uuids.dest_bw_rp}})
@mock.patch(
'nova.compute.utils.'
'update_pci_request_spec_with_allocated_interface_name')
@mock.patch('nova.scheduler.utils.fill_provider_mapping')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_call_livem_checks_on_host')
@mock.patch.object(live_migrate.LiveMigrationTask,
'_check_compatible_with_source_hypervisor')
@mock.patch.object(query.SchedulerQueryClient, 'select_destinations',
return_value=[[fake_selection1]])
@mock.patch.object(objects.RequestSpec, 'reset_forced_destinations')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
def test_find_destination_with_resource_request(
self, mock_setup, mock_reset, mock_select, mock_check, mock_call,
mock_fill_provider_mapping, mock_update_pci_req):
resource_req = [objects.RequestGroup(requester_id=uuids.port_id)]
self.mock_get_res_req.return_value = resource_req
self.assertEqual(("host1", "node1", fake_limits1),
self.task._find_destination())
# Make sure the request_spec was updated to include the cell
# mapping.
self.assertIsNotNone(self.fake_spec.requested_destination.cell)
# Make sure the spec was updated to include the project_id.
self.assertEqual(self.fake_spec.project_id, self.instance.project_id)
# Make sure that requested_resources are added to the request spec
self.assertEqual(
resource_req, self.task.request_spec.requested_resources)
mock_setup.assert_called_once_with(self.context, self.fake_spec)
mock_reset.assert_called_once_with()
self.ensure_network_metadata_mock.assert_called_once_with(
self.instance)
self.heal_reqspec_is_bfv_mock.assert_called_once_with(
self.context, self.fake_spec, self.instance)
mock_select.assert_called_once_with(self.context, self.fake_spec,
[self.instance.uuid], return_objects=True, return_alternates=False)
mock_check.assert_called_once_with('host1')
mock_call.assert_called_once_with('host1', {uuids.port_id: []})
mock_fill_provider_mapping.assert_called_once_with(
self.task.request_spec, fake_selection1)
mock_update_pci_req.assert_called_once_with(
self.context, self.task.report_client, self.instance,
{uuids.port_id: []})
@mock.patch.object(objects.InstanceMapping, 'get_by_instance_uuid',
side_effect=exception.InstanceMappingNotFound(
uuid=uuids.instance))
def test_get_source_cell_mapping_not_found(self, mock_get):
"""Negative test where InstanceMappingNotFound is raised and converted
to MigrationPreCheckError.
"""
self.assertRaises(exception.MigrationPreCheckError,
self.task._get_source_cell_mapping)
mock_get.assert_called_once_with(
self.task.context, self.task.instance.uuid)
@mock.patch.object(objects.HostMapping, 'get_by_host',
side_effect=exception.HostMappingNotFound(
name='destination'))
def test_get_destination_cell_mapping_not_found(self, mock_get):
"""Negative test where HostMappingNotFound is raised and converted
to MigrationPreCheckError.
"""
self.assertRaises(exception.MigrationPreCheckError,
self.task._get_destination_cell_mapping)
mock_get.assert_called_once_with(
self.task.context, self.task.destination)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient.'
'remove_provider_tree_from_instance_allocation')
def test_remove_host_allocations(self, remove_provider):
self.task._remove_host_allocations(uuids.cn)
remove_provider.assert_called_once_with(
self.task.context, self.task.instance.uuid, uuids.cn)
def test_check_can_migrate_pci(self):
"""Tests that _check_can_migrate_pci() allows live-migration if
instance does not contain non-network related PCI requests and
raises MigrationPreCheckError otherwise
"""
@mock.patch.object(self.task.network_api,
'supports_port_binding_extension')
@mock.patch.object(live_migrate,
'supports_vif_related_pci_allocations')
def _test(instance_pci_reqs,
supp_binding_ext_retval,
supp_vif_related_pci_alloc_retval,
mock_supp_vif_related_pci_alloc,
mock_supp_port_binding_ext):
mock_supp_vif_related_pci_alloc.return_value = \
supp_vif_related_pci_alloc_retval
mock_supp_port_binding_ext.return_value = \
supp_binding_ext_retval
self.task.instance.pci_requests = instance_pci_reqs
self.task._check_can_migrate_pci("Src", "Dst")
# in case we managed to get away without rasing, check mocks
if instance_pci_reqs:
mock_supp_port_binding_ext.assert_called_once_with(
self.context)
self.assertTrue(mock_supp_vif_related_pci_alloc.called)
# instance has no PCI requests
_test(None, False, False) # No support in Neutron and Computes
_test(None, True, False) # No support in Computes
_test(None, False, True) # No support in Neutron
_test(None, True, True) # Support in both Neutron and Computes
# instance contains network related PCI requests (alias_name=None)
pci_requests = objects.InstancePCIRequests(
requests=[objects.InstancePCIRequest(alias_name=None)])
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, False, False)
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, True, False)
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, False, True)
_test(pci_requests, True, True)
# instance contains Non network related PCI requests (alias_name!=None)
pci_requests.requests.append(
objects.InstancePCIRequest(alias_name="non-network-related-pci"))
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, False, False)
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, True, False)
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, False, True)
self.assertRaises(exception.MigrationPreCheckError,
_test, pci_requests, True, True)
def test_check_can_migrate_specific_resources(self):
"""Test _check_can_migrate_specific_resources allows live migration
with vpmem.
"""
@mock.patch.object(live_migrate, 'supports_vpmem_live_migration')
def _test(resources, supp_lm_vpmem_retval, mock_support_lm_vpmem):
self.instance.resources = resources
mock_support_lm_vpmem.return_value = supp_lm_vpmem_retval
self.task._check_can_migrate_specific_resources()
vpmem_0 = objects.LibvirtVPMEMDevice(
label='4GB', name='ns_0', devpath='/dev/dax0.0',
size=4292870144, align=2097152)
resource_0 = objects.Resource(
provider_uuid=uuids.rp,
resource_class="CUSTOM_PMEM_NAMESPACE_4GB",
identifier='ns_0', metadata=vpmem_0)
resources = objects.ResourceList(
objects=[resource_0])
_test(None, False)
_test(None, True)
_test(resources, True)
self.assertRaises(exception.MigrationPreCheckError,
_test, resources, False)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from telemetry import benchmark
from telemetry import record_wpr
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry import decorators
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from telemetry.page import page_test
from telemetry.unittest_util import tab_test_case
class MockPage(page_module.Page):
def __init__(self, page_set, url):
super(MockPage, self).__init__(url=url,
page_set=page_set,
base_dir=util.GetUnittestDataDir())
self.func_calls = []
def RunNavigateSteps(self, action_runner):
self.func_calls.append('RunNavigateSteps')
super(MockPage, self).RunNavigateSteps(action_runner)
def RunPageInteractions(self, _):
self.func_calls.append('RunPageInteractions')
def RunSmoothness(self, _):
self.func_calls.append('RunSmoothness')
class MockPageSet(page_set_module.PageSet):
def __init__(self, url=''):
super(MockPageSet, self).__init__(
archive_data_file='data/archive_files/test.json')
self.AddUserStory(MockPage(self, url))
class MockPageTest(page_test.PageTest):
def __init__(self):
super(MockPageTest, self).__init__()
self._action_name_to_run = "RunPageInteractions"
self.func_calls = []
def CustomizeBrowserOptions(self, options):
self.func_calls.append('CustomizeBrowserOptions')
def WillNavigateToPage(self, page, tab):
self.func_calls.append('WillNavigateToPage')
def DidNavigateToPage(self, page, tab):
self.func_calls.append('DidNavigateToPage')
def ValidateAndMeasurePage(self, page, tab, results):
self.func_calls.append('ValidateAndMeasurePage')
def WillStartBrowser(self, platform):
self.func_calls.append('WillStartBrowser')
def DidStartBrowser(self, browser):
self.func_calls.append('DidStartBrowser')
class MockBenchmark(benchmark.Benchmark):
test = MockPageTest
mock_page_set = None
@classmethod
def AddBenchmarkCommandLineArgs(cls, group):
group.add_option('', '--mock-benchmark-url', action='store', type='string')
def CreatePageSet(self, options):
kwargs = {}
if options.mock_benchmark_url:
kwargs['url'] = options.mock_benchmark_url
self.mock_page_set = MockPageSet(**kwargs)
return self.mock_page_set
class RecordWprUnitTests(tab_test_case.TabTestCase):
_base_dir = util.GetUnittestDataDir()
_test_data_dir = os.path.join(util.GetUnittestDataDir(), 'page_tests')
@classmethod
def setUpClass(cls):
sys.path.extend([cls._base_dir, cls._test_data_dir])
super(RecordWprUnitTests, cls).setUpClass()
cls._url = cls.UrlOfUnittestFile('blank.html')
# When the RecorderPageTest is created from a PageSet, we do not have a
# PageTest to use. In this case, we will record every available action.
def testRunPage_AllActions(self):
record_page_test = record_wpr.RecorderPageTest()
page = MockPage(page_set=MockPageSet(url=self._url), url=self._url)
record_page_test.RunNavigateSteps(page, self._tab)
self.assertTrue('RunNavigateSteps' in page.func_calls)
record_page_test.RunPage(page, self._tab, results=None)
self.assertTrue('RunPageInteractions' in page.func_calls)
# When the RecorderPageTest is created from a Benchmark, the benchmark will
# have a PageTest, specified by its test attribute.
def testRunPage_OnlyRunBenchmarkAction(self):
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
page = MockPage(page_set=MockPageSet(url=self._url), url=self._url)
record_page_test.RunPage(page, self._tab, results=None)
self.assertTrue('RunPageInteractions' in page.func_calls)
self.assertFalse('RunSmoothness' in page.func_calls)
def testRunPage_CallBenchmarksPageTestsFunctions(self):
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
page = MockPage(page_set=MockPageSet(url=self._url), url=self._url)
record_page_test.RunPage(page, self._tab, results=None)
self.assertEqual(1, len(record_page_test.page_test.func_calls))
self.assertEqual('ValidateAndMeasurePage',
record_page_test.page_test.func_calls[0])
@decorators.Disabled('chromeos') # crbug.com/404868.
def testWprRecorderWithPageSet(self):
flags = ['--browser', self._browser.browser_type,
'--device', self._device]
mock_page_set = MockPageSet(url=self._url)
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir,
mock_page_set, flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_page_set.pages), results.pages_that_succeeded)
def testWprRecorderWithBenchmark(self):
flags = ['--mock-benchmark-url', self._url,
'--browser', self._browser.browser_type,
'--device', self._device]
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, mock_benchmark,
flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_page_set.pages),
results.pages_that_succeeded)
def testPageSetBaseDirFlag(self):
flags = [
'--page-set-base-dir', self._test_data_dir,
'--mock-benchmark-url', self._url,
'--browser', self._browser.browser_type,
'--device', self._device
]
mock_benchmark = MockBenchmark()
wpr_recorder = record_wpr.WprRecorder(
'non-existent-dummy-dir', mock_benchmark, flags)
results = wpr_recorder.CreateResults()
wpr_recorder.Record(results)
self.assertEqual(set(mock_benchmark.mock_page_set.pages),
results.pages_that_succeeded)
def testCommandLineFlags(self):
flags = [
'--page-repeat', '2',
'--mock-benchmark-url', self._url,
'--upload',
]
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
# page_runner command-line args
self.assertEquals(2, wpr_recorder.options.page_repeat)
# benchmark command-line args
self.assertEquals(self._url, wpr_recorder.options.mock_benchmark_url)
# record_wpr command-line arg to upload to cloud-storage.
self.assertTrue(wpr_recorder.options.upload)
# invalid command-line args
self.assertFalse(hasattr(wpr_recorder.options, 'not_a_real_option'))
def testRecordingEnabled(self):
flags = ['--mock-benchmark-url', self._url]
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
self.assertEqual(wpr_modes.WPR_RECORD,
wpr_recorder.options.browser_options.wpr_mode)
# When the RecorderPageTest CustomizeBrowserOptions/WillStartBrowser/
# DidStartBrowser function is called, it forwards the call to the PageTest
def testRecorderPageTest_BrowserMethods(self):
flags = ['--mock-benchmark-url', self._url]
record_page_test = record_wpr.RecorderPageTest()
record_page_test.page_test = MockBenchmark().test()
wpr_recorder = record_wpr.WprRecorder(self._test_data_dir, MockBenchmark(),
flags)
record_page_test.CustomizeBrowserOptions(wpr_recorder.options)
record_page_test.WillStartBrowser(self._tab.browser.platform)
record_page_test.DidStartBrowser(self._tab.browser)
self.assertTrue(
'CustomizeBrowserOptions' in record_page_test.page_test.func_calls)
self.assertTrue('WillStartBrowser' in record_page_test.page_test.func_calls)
self.assertTrue('DidStartBrowser' in record_page_test.page_test.func_calls)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
import tempfile
import confluent.sshutil as sshutil
import confluent.util as util
import confluent.noderange as noderange
import eventlet
import pwd
import grp
def mkdirp(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17:
raise
def get_entries(filename):
secname = 'REPLACE:'
filename = filename.strip()
if filename[-1] == '>':
filename = filename[:-1]
with open(filename, 'r') as slfile:
slist = slfile.read()
entries = slist.split('\n')
for ent in entries:
ent = ent.split('#', 1)[0].strip()
if not ent:
continue
if ent in ('APPENDONCE:', 'MERGE:', 'REPLACE:'):
secname = ent
if ent[0] == '<':
subfilename = ent[1:]
if subfilename[-1] == '>':
subfilename = subfilename[:-1]
if subfilename[0] != '/':
subfilename = os.path.join(os.path.dirname(filename), subfilename)
for subent in get_entries(subfilename):
yield subent
yield secname
else:
yield ent
class SyncList(object):
def __init__(self, filename, nodename, cfg):
slist = None
self.replacemap = {}
self.appendmap = {}
self.appendoncemap = {}
self.mergemap = {}
self.optmap = {}
entries = get_entries(filename)
currmap = self.replacemap
for ent in entries:
try:
cmtidx = ent.index('#')
ent = ent[:cmtidx]
except ValueError:
pass
for special in '$%^&|{}':
if special in ent:
raise Exception(
'Special character "{}" reserved for future use'.format(special))
ent = ent.strip()
if not ent:
continue
if ent[-1] == ':':
if ent == 'MERGE:':
currmap = self.mergemap
elif ent == 'APPENDONCE:':
currmap = self.appendoncemap
elif ent == 'REPLACE:':
currmap = self.replacemap
else:
raise Exception(
'Section "{}" is not currently supported in syncfiles'.format(ent[:-1]))
continue
if '->' in ent:
k, v = ent.split('->')
k = k.strip()
v = v.strip()
if ':' in v:
nr, v = v.split(':', 1)
for candidate in noderange.NodeRange(nr, cfg).nodes:
if candidate == nodename:
break
else:
continue
optparts = v.split()
v = optparts[0]
optparts = optparts[1:]
else:
kparts = []
optparts = []
currparts = kparts
for part in ent.split():
if part[0] == '(':
currparts = optparts
currparts.append(part)
k = ' '.join(kparts)
v = None
entopts = {}
if optparts:
if optparts[0][0] != '(' or optparts[-1][-1] != ')':
raise Exception("Unsupported syntax in syncfile: " + ent)
opts = ','.join(optparts)
opts = opts[1:-1]
for opt in opts.split(','):
optname, optval = opt.split('=')
if optname == 'owner':
try:
uid = pwd.getpwnam(optval).pw_uid
except KeyError:
try:
uid = int(optval)
optval = None
except ValueError:
uid = None
if optval:
optval = {'name': optval}
else:
optval = {}
if uid is not None:
optval['id'] = uid
elif optname == 'group':
try:
gid = grp.getgrnam(optval).gr_gid
except KeyError:
try:
gid = int(optval)
optval = None
except ValueError:
gid = None
if optval:
optval = {'name': optval}
else:
optval = {}
if gid is not None:
optval['id'] = gid
if optval:
entopts[optname] = optval
currmap[k] = v
targ = v if v else k
for f in targ.split():
self.optmap[f] = entopts
def sync_list_to_node(sl, node, suffixes):
targdir = tempfile.mkdtemp('.syncto{}'.format(node))
output = ''
try:
for ent in sl.replacemap:
stage_ent(sl.replacemap, ent, targdir)
if 'append' in suffixes:
while suffixes['append'] and suffixes['append'][0] == '/':
suffixes['append'] = suffixes['append'][1:]
for ent in sl.appendmap:
stage_ent(sl.appendmap, ent,
os.path.join(targdir, suffixes['append']))
if 'merge' in suffixes:
while suffixes['merge'] and suffixes['merge'][0] == '/':
suffixes['merge'] = suffixes['merge'][1:]
for ent in sl.mergemap:
stage_ent(sl.mergemap, ent,
os.path.join(targdir, suffixes['merge']), True)
if 'appendonce' in suffixes:
while suffixes['appendonce'] and suffixes['appendonce'][0] == '/':
suffixes['appendonce'] = suffixes['appendonce'][1:]
for ent in sl.appendoncemap:
stage_ent(sl.appendoncemap, ent,
os.path.join(targdir, suffixes['appendonce']), True)
sshutil.prep_ssh_key('/etc/confluent/ssh/automation')
output = util.run(
['rsync', '-rvLD', targdir + '/', 'root@{}:/'.format(node)])[0]
except Exception as e:
if 'CalledProcessError' not in repr(e):
# https://github.com/eventlet/eventlet/issues/413
# for some reason, can't catch the calledprocesserror normally
# for this exception, implement a hack workaround
raise
unreadablefiles = []
for root, dirnames, filenames in os.walk(targdir):
for filename in filenames:
filename = os.path.join(root, filename)
try:
with open(filename, 'r') as _:
pass
except OSError as e:
unreadablefiles.append(filename.replace(targdir, ''))
if unreadablefiles:
raise Exception("Syncing failed due to unreadable files: " + ','.join(unreadablefiles))
else:
raise
finally:
shutil.rmtree(targdir)
if not isinstance(output, str):
output = output.decode('utf8')
retval = {
'options': sl.optmap,
'output': output,
}
return retval # need dictionary with output and options
def stage_ent(currmap, ent, targdir, appendexist=False):
dst = currmap[ent]
everyfent = []
allfents = ent.split()
for tmpent in allfents:
fents = glob.glob(tmpent)
if not fents:
raise Exception('No matching files for "{}"'.format(tmpent))
everyfent.extend(fents)
if not everyfent:
raise Exception('No matching files for "{}"'.format(ent))
if dst is None: # this is to indicate source and destination as one
dst = os.path.dirname(everyfent[0]) + '/'
while dst and dst[0] == '/':
dst = dst[1:]
if len(everyfent) > 1 and dst[-1] != '/':
raise Exception(
'Multiple files match {}, {} needs a trailing slash to indicate a directory'.format(ent, dst))
fulltarg = os.path.join(targdir, dst)
for targ in everyfent:
mkpathorlink(targ, fulltarg, appendexist)
def mkpathorlink(source, destination, appendexist=False):
if os.path.isdir(source):
mkdirp(destination)
for ent in os.listdir(source):
currsrc = os.path.join(source, ent)
currdst = os.path.join(destination, ent)
mkpathorlink(currsrc, currdst)
else:
if destination[-1] == '/':
mkdirp(destination)
destination = os.path.join(destination, os.path.basename(source))
else:
mkdirp(os.path.dirname(destination))
if appendexist and os.path.exists(destination):
tmphdl, tmpnam = tempfile.mkstemp()
try:
shutil.copy(destination, tmpnam)
finally:
os.close(tmphdl)
os.remove(destination)
with open(destination, 'w') as realdest:
with open(tmpnam) as olddest:
realdest.write(olddest.read())
with open(source) as sourcedata:
realdest.write(sourcedata.read())
os.remove(tmpnam)
else:
if os.path.islink(destination):
os.remove(destination)
os.symlink(source, destination)
syncrunners = {}
def start_syncfiles(nodename, cfg, suffixes):
deployinfo = cfg.get_node_attributes(
nodename, ('deployment.*',))
deployinfo = deployinfo.get(nodename, {})
profile = deployinfo.get(
'deployment.pendingprofile', {}).get('value', '')
if not profile:
profile = deployinfo.get(
'deployment.stagedprofile', {}).get('value', '')
if not profile:
profile = deployinfo.get(
'deployment.profile', {}).get('value', '')
if not profile:
raise Exception('Cannot perform syncfiles without profile assigned')
synclist = '/var/lib/confluent/public/os/{}/syncfiles'.format(profile)
if not os.path.exists(synclist):
return '200 OK' # not running
sl = SyncList(synclist, nodename, cfg)
if not (sl.appendmap or sl.mergemap or sl.replacemap or sl.appendoncemap):
return '200 OK' # the synclist has no actual entries
syncrunners[nodename] = eventlet.spawn(
sync_list_to_node, sl, nodename, suffixes)
return '202 Queued' # backgrounded
def get_syncresult(nodename):
if nodename not in syncrunners:
return ('204 Not Running', '')
if not syncrunners[nodename].dead:
return ('200 OK', '')
result = syncrunners[nodename].wait()
del syncrunners[nodename]
return ('200 OK', result)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'WelcomeEmailRecipient', fields ['recipient']
db.create_unique('canvas_welcomeemailrecipient', ['recipient_id'])
def backwards(self, orm):
# Removing unique constraint on 'WelcomeEmailRecipient', fields ['recipient']
db.delete_unique('canvas_welcomeemailrecipient', ['recipient_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'unique_together': "(('user', 'app'),)", 'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_textonlyop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'disable_remix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.externalcontent': {
'Meta': {'object_name': 'ExternalContent'},
'_data': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_content'", 'to': "orm['canvas.Comment']"}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'null': 'True', 'blank': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'unique_together': "(('user', 'category'),)", 'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'is_qa': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.welcomeemailrecipient': {
'Meta': {'object_name': 'WelcomeEmailRecipient'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas_auth.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
|
#!/usr/bin/env python
#
# switch_tests.py: testing `svn switch'.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import shutil, re, os
# Our testing module
import svntest
from svntest import verify, actions, main
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
Item = svntest.wc.StateItem
from svntest.main import SVN_PROP_MERGEINFO, server_has_mergeinfo
from externals_tests import change_external
from switch_tests import do_routine_switching
#----------------------------------------------------------------------
def relocate_deleted_missing_copied(sbox):
"relocate with deleted, missing and copied entries"
sbox.build()
wc_dir = sbox.wc_dir
# Delete A/mu to create a deleted entry for mu in A/.svn/entries
mu_path = os.path.join(wc_dir, 'A', 'mu')
svntest.actions.run_and_verify_svn(None, None, [], 'rm', mu_path)
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/mu')
expected_output = svntest.wc.State(wc_dir, {
'A/mu' : Item(verb='Deleting'),
})
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# Remove A/B/F to create a missing entry
svntest.main.safe_rmtree(os.path.join(wc_dir, 'A', 'B', 'F'))
# Copy A/D to A/D2
D_path = os.path.join(wc_dir, 'A', 'D')
D2_path = os.path.join(wc_dir, 'A', 'D2')
svntest.actions.run_and_verify_svn(None, None, [], 'copy',
D_path, D2_path)
# Delete within the copy
D2G_path = os.path.join(wc_dir, 'A', 'D2', 'G')
svntest.actions.run_and_verify_svn(None, None, [], 'rm', D2G_path)
expected_status.add({
'A/D2' : Item(status='A ', wc_rev='-', copied='+'),
'A/D2/gamma' : Item(status=' ', wc_rev='-', copied='+'),
'A/D2/G' : Item(status='D ', wc_rev='-', copied='+'),
'A/D2/G/pi' : Item(status='D ', wc_rev='-', copied='+'),
'A/D2/G/rho' : Item(status='D ', wc_rev='-', copied='+'),
'A/D2/G/tau' : Item(status='D ', wc_rev='-', copied='+'),
'A/D2/H' : Item(status=' ', wc_rev='-', copied='+'),
'A/D2/H/chi' : Item(status=' ', wc_rev='-', copied='+'),
'A/D2/H/omega' : Item(status=' ', wc_rev='-', copied='+'),
'A/D2/H/psi' : Item(status=' ', wc_rev='-', copied='+'),
})
expected_status.tweak('A/B/F', status='! ', wc_rev='1')
svntest.actions.run_and_verify_status(wc_dir, expected_status)
# Relocate
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
svntest.main.copy_repos(repo_dir, other_repo_dir, 2, 0)
svntest.main.safe_rmtree(repo_dir, 1)
svntest.actions.run_and_verify_svn(None, None, [], 'switch', '--relocate',
repo_url, other_repo_url, wc_dir)
# Deleted and missing entries should be preserved, so update should
# show only A/B/F being reinstated
expected_output = svntest.wc.State(wc_dir, {
'A/B/F' : Item(verb='Restored'),
})
expected_disk = svntest.main.greek_state.copy()
expected_disk.remove('A/mu')
expected_disk.add({
'A/D2' : Item(),
'A/D2/gamma' : Item("This is the file 'gamma'.\n"),
'A/D2/H' : Item(),
'A/D2/H/chi' : Item("This is the file 'chi'.\n"),
'A/D2/H/omega' : Item("This is the file 'omega'.\n"),
'A/D2/H/psi' : Item("This is the file 'psi'.\n"),
})
expected_status.add({
'A/B/F' : Item(status=' ', wc_rev='2'),
})
expected_status.tweak(wc_rev=2)
expected_status.tweak('A/D2', 'A/D2/gamma',
'A/D2/H', 'A/D2/H/chi', 'A/D2/H/omega', 'A/D2/H/psi',
wc_rev='-')
expected_status.tweak('A/D2/G', 'A/D2/G/pi', 'A/D2/G/rho', 'A/D2/G/tau',
copied='+', wc_rev='-')
svntest.actions.run_and_verify_update(wc_dir,
expected_output,
expected_disk,
expected_status)
# Commit to verify that copyfrom URLs have been relocated
expected_output = svntest.wc.State(wc_dir, {
'A/D2' : Item(verb='Adding'),
'A/D2/G' : Item(verb='Deleting'),
})
expected_status.tweak('A/D2', 'A/D2/gamma',
'A/D2/H', 'A/D2/H/chi', 'A/D2/H/omega', 'A/D2/H/psi',
status=' ', wc_rev='3', copied=None)
expected_status.remove('A/D2/G', 'A/D2/G/pi', 'A/D2/G/rho', 'A/D2/G/tau')
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
None, wc_dir)
#----------------------------------------------------------------------
@Issue(2380)
def relocate_beyond_repos_root(sbox):
"relocate with prefixes longer than repo root"
sbox.build(read_only=True, create_wc=False)
wc_backup = sbox.add_wc_path('backup')
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
A_url = repo_url + "/A"
A_wc_dir = wc_dir
other_A_url = other_repo_url + "/A"
other_B_url = other_repo_url + "/B"
svntest.main.safe_rmtree(wc_dir, 1)
svntest.actions.run_and_verify_svn(None, None, [], 'checkout',
repo_url + '/A', wc_dir)
svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 0)
# A relocate that changes the repo path part of the URL shouldn't work.
# This tests for issue #2380.
svntest.actions.run_and_verify_svn(None, None,
".*Invalid relocation destination.*",
'relocate',
A_url, other_B_url, A_wc_dir)
# Another way of trying to change the fs path, leading to an invalid
# repository root.
svntest.actions.run_and_verify_svn(None, None,
".*is not the root.*",
'relocate',
repo_url, other_B_url, A_wc_dir)
svntest.actions.run_and_verify_svn(None, None, [],
'relocate',
A_url, other_A_url, A_wc_dir)
# Check that we can contact the repository, meaning that the
# relocate actually changed the URI. Escape the expected URI to
# avoid problems from any regex meta-characters it may contain
# (e.g. '+').
expected_infos = [
{ 'URL' : re.escape(other_A_url) + '$',
'Path' : '.+',
'Repository UUID' : '.+',
'Revision' : '.+',
'Node Kind' : '.+',
'Last Changed Date' : '.+' },
]
svntest.actions.run_and_verify_info(expected_infos, A_wc_dir, '-rHEAD')
#----------------------------------------------------------------------
# Issue 2578.
def relocate_and_propset(sbox):
"out of date propset should fail after a relocate"
# Create virgin repos and working copy
svntest.main.safe_rmtree(sbox.repo_dir, 1)
svntest.main.create_repos(sbox.repo_dir)
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
# import the greek tree
svntest.main.greek_state.write_to_disk(svntest.main.greek_dump_dir)
exit_code, output, errput = svntest.main.run_svn(
None, 'import', '-m', 'Log message for revision 1.',
svntest.main.greek_dump_dir, sbox.repo_url)
# checkout
svntest.main.safe_rmtree(wc_dir, 1)
svntest.actions.run_and_verify_svn(None,
None, [],
'checkout',
repo_url, wc_dir)
# Relocate
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 0)
svntest.main.safe_rmtree(repo_dir, 1)
svntest.actions.run_and_verify_svn(None, None, [], 'relocate',
repo_url, other_repo_url, wc_dir)
# Remove gamma from the working copy.
D_path = os.path.join(wc_dir, 'A', 'D')
gamma_path = os.path.join(wc_dir, 'A', 'D', 'gamma')
svntest.main.run_svn(None, 'rm', gamma_path)
# Create expected commit output.
expected_output = svntest.wc.State(wc_dir, {
'A/D/gamma' : Item(verb='Deleting'),
})
# After committing, status should show no sign of gamma.
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.remove('A/D/gamma')
# Commit the deletion of gamma and verify.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output,
expected_status,
None, wc_dir)
# Now gamma should be marked as `deleted' under the hood, at
# revision 2. Meanwhile, A/D is still lagging at revision 1.
# Make a propchange on A/D
svntest.main.run_svn(None, 'ps', 'foo', 'bar', D_path)
# Commit and *expect* a repository Merge failure:
svntest.actions.run_and_verify_commit(wc_dir,
None,
None,
"[Oo]ut.of.date",
wc_dir)
#----------------------------------------------------------------------
def single_file_relocate(sbox):
"relocate a single file"
# Create virgin repos and working copy
svntest.main.safe_rmtree(sbox.repo_dir, 1)
svntest.main.create_repos(sbox.repo_dir)
wc_dir = sbox.wc_dir
iota_path = os.path.join(sbox.wc_dir, 'iota')
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
iota_url = repo_url + '/iota'
# import the greek tree
svntest.main.greek_state.write_to_disk(svntest.main.greek_dump_dir)
exit_code, output, errput = svntest.main.run_svn(
None, 'import', '-m', 'Log message for revision 1.',
svntest.main.greek_dump_dir, sbox.repo_url)
# checkout
svntest.main.safe_rmtree(wc_dir, 1)
svntest.actions.run_and_verify_svn(None,
None, [],
'checkout',
repo_url, wc_dir)
# Relocate
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
other_iota_url = other_repo_url + '/iota'
svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 0)
svntest.main.safe_rmtree(repo_dir, 1)
svntest.actions.run_and_verify_svn(None, None,
".*Cannot relocate.*",
'relocate',
iota_url, other_iota_url, iota_path)
#----------------------------------------------------------------------
def relocate_with_switched_children(sbox):
"relocate a directory with switched children"
sbox.build()
wc_dir = sbox.wc_dir
# Setup (and verify) some switched things
do_routine_switching(sbox.wc_dir, sbox.repo_url, False)
# Relocate
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
svntest.main.copy_repos(repo_dir, other_repo_dir, 1, 0)
svntest.main.safe_rmtree(repo_dir, 1)
# Do the switch and check the results in three ways.
svntest.actions.run_and_verify_svn(None, None, [], 'relocate',
repo_url, other_repo_url, wc_dir)
# Attempt to commit changes and examine results
expected_output = svntest.wc.State(wc_dir, { })
expected_status = svntest.actions.get_virginal_state(wc_dir, 1)
expected_status.tweak('A/B', 'iota',
switched='S')
expected_status.remove('A/B/E', 'A/B/F', 'A/B/E/alpha', 'A/B/E/beta',
'A/B/lambda')
expected_status.add({
'A/B/pi' : Item(status=' ', wc_rev='1'),
'A/B/rho' : Item(status=' ', wc_rev='1'),
'A/B/tau' : Item(status=' ', wc_rev='1'),
})
# This won't actually do a commit, because nothing should be modified.
svntest.actions.run_and_verify_commit(wc_dir,
expected_output, expected_status,
None, wc_dir)
# Check the URLs of various nodes.
info_output = {
wc_dir: '.*.other$',
os.path.join(wc_dir, 'iota'): '.*.other/A/D/gamma$',
os.path.join(wc_dir, 'A', 'B'): '.*.other/A/D/G$',
os.path.join(wc_dir, 'A', 'B', 'pi'): '.*.other/A/D/G/pi$',
}
for path, pattern in info_output.items():
expected_info = { 'URL' : pattern }
svntest.actions.run_and_verify_info([expected_info], path)
#----------------------------------------------------------------------
### regression test for issue #3597
@Issue(3597)
def relocate_with_relative_externals(sbox):
"relocate a directory containing relative externals"
sbox.build()
wc_dir = sbox.wc_dir
# Add a relative external.
change_external(os.path.join(wc_dir, 'A', 'B'),
"^/A/D/G G-ext\n../D/H H-ext", commit=True)
svntest.actions.run_and_verify_svn(None, None, [], 'update', wc_dir)
# Move our repository to another location.
repo_dir = sbox.repo_dir
repo_url = sbox.repo_url
other_repo_dir, other_repo_url = sbox.add_repo_path('other')
svntest.main.copy_repos(repo_dir, other_repo_dir, 2, 0)
svntest.main.safe_rmtree(repo_dir, 1)
# Now relocate our working copy.
svntest.actions.run_and_verify_svn(None, None, [], 'relocate',
repo_url, other_repo_url, wc_dir)
# Check the URLs of the externals -- were they updated to point to the
# .other repository URL?
svntest.actions.run_and_verify_info([{ 'URL' : '.*.other/A/D/G$' }],
os.path.join(wc_dir, 'A', 'B', 'G-ext'))
svntest.actions.run_and_verify_info([{ 'URL' : '.*.other/A/D/H$' }],
os.path.join(wc_dir, 'A', 'B', 'H-ext'))
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
relocate_deleted_missing_copied,
relocate_beyond_repos_root,
relocate_and_propset,
single_file_relocate,
relocate_with_switched_children,
relocate_with_relative_externals,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
|
|
'''connection.py: a framework for defining/managing database connections'''
import abc
import importlib.util
import logging
#use NullHandler to avoid "NoneType is not Scriptable" exceptions
DEFAULT_LOGGER = logging.getLogger('NULL')
DEFAULT_LOGGER.addHandler(logging.NullHandler())
from plumbum import local
import pandas
import prosper.warehouse.Utilities as table_utils #TODO, required?
class TableType:
'''enumeration for tabletypes'''
MySQL = 'MySQL'
Postgres = 'Postgres'
NOTDEFINED = 'NOTDEFINED'
def set_table_type(self, string_enum):
'''roll enum from string'''
if string_enum.lower() == 'mysql':
return self.MySQL
elif string_enum.lower() == 'postgres':
return self.Postgres
else:
return self.NOTDEFINED
class Database(metaclass=abc.ABCMeta):
'''parent class for holding database connection info'''
_debug = False
_logger = None
def __init__(self, datasource_name, debug=False, loging_handle=DEFAULT_LOGGER):
'''basic info about all databases'''
self._debug = debug
self._logger = loging_handle
self._logger.info(
'Database __init__(' +
'\r\tdatasouce_name={0},'.format(datasource_name) +
'\r\tdebug={0},'.format(str(debug)) +
'\r\tlogger={0})'.format(str(loging_handle))
)
self._logger.debug('-- Global Setup')
self.datasource_name = datasource_name
self.local_path = self.set_local_path()
self.index_key = None
self.primary_keys, self.data_keys = self.get_keys()
self.all_keys=[]
self.all_keys.append(self.index_key)
self.all_keys.extend(self.primary_keys)
self.all_keys.extend(self.data_keys)
self._logger.info('--DATABASE: got keys from config')
self.table_type = self._define_table_type()
try:
self.test_table()
except Exception as error_msg:
self._logger.error(
'EXCEPTION: test_table failed',
exc_info=True
)
raise error_msg
def __str__(self):
return self.datasource_name
@abc.abstractmethod
def set_local_path(self):
'''set CWD path for sourcing files'''
#TODO: is level correct/required?
pass
@abc.abstractmethod
def _define_table_type(self):
'''helps define technology for class definition'''
#TODO: is level correct/required?
pass
@abc.abstractmethod
def get_keys(self):
'''get primary/data keys for query manipulation'''
#TODO: move config parser up to higher layer?
pass
@abc.abstractmethod
def _set_info(self):
'''save useful info about table/datasource'''
pass
#@abc.abstractmethod
#def get_connection(self):
# '''get con[nection] for database handles'''
# pass
@abc.abstractmethod
def get_data(
self,
datetime_start,
datetime_end=None, limit=None, kwargs_passthrough=None,
*args, **kwargs
):
'''process queries to fetch data'''
#**kwargs: filter query keys
#*args: data keys to return
pass
@abc.abstractmethod
def put_data(self, payload):
'''save data to table (if allowed)'''
pass
@abc.abstractmethod
def test_table(self):
'''validate that datasource exists, and is ready for data'''
pass
@abc.abstractmethod
def _direct_query(self, query_str):
'''some tests require direct SQL execution. Support those calls internally ONLY'''
pass
@abc.abstractmethod
def latest_entry(self, **kwargs):
'''get the latest instance of a filter to avoid overwrites'''
pass
class SQLTable(Database):
'''child class for handling TimeSeries databases'''
def __init__(self, datasource_name, debug=False, loging_handle=DEFAULT_LOGGER):
'''Traditional SQL-style hook setup'''
self._logger = loging_handle
self._logger.info('SQLTable __init__()')
self._connection,self._cursor = self.get_connection()
self.table_name, self.schema_name = self._set_info()
super().__init__(datasource_name, debug, loging_handle)
@abc.abstractmethod
def get_connection(self):
'''get con[nection] for database handles'''
pass
@abc.abstractmethod
def get_table_create_string(self):
'''get/parse table-create file'''
pass
def _direct_query(self, query_str):
'''direct query for SQL tables'''
#TODO: if/else check for every query seems wasteful, rework?
self._logger.info('--_direct_query')
#FIXME vvv do different coonections need different execute/fetch cmds?
if self.table_type == TableType.MySQL:
#MYSQL EXECUTE
try:
self._cursor.execute(query_str)
query_result = self._cursor.fetchall()
except Exception as error_msg:
#log error one step up
raise error_msg
return query_result
elif self.table_type == TableType.Postgres:
#POSTGRESS EXECUTE
raise NotImplementedError('Postgres not supported yet')
else:
raise UnsupportedTableType(
'unsupported table type: ' + str(self.table_type),
self.table_name
)
def _create_table(self, full_create_string):
'''handles executing table-create query'''
self._logger.info('--_create_table')
command_list = full_create_string.split(';')
for command in command_list:
self._logger.debug('-- `{0}`'.format(command))
if command.startswith('--') or \
command == '\n':
#don't execute comments or blank lines
#FIXME hacky as fuck
continue
self._cursor.execute(command)
self._connection.commit()
def test_table_exists(
self,
table_name,
schema_name
):
'''basic test for table existing'''
self._logger.info(
'-- test_table_exists(' +
'\r\ttable_name={0},'.format(table_name) +
'\r\tschema_name={0})'.format(schema_name)
)
exists_query = ''
exists_result = False #TODO: remove?
if self.table_type == TableType.MySQL:
exists_query = \
'''SHOW TABLES LIKE \'{table_name}\''''.\
format(
table_name=table_name
)
else:
raise UnsupportedTableType(
'unsupported table type: ' + str(self.table_type),
table_name
)
self._logger.debug(exists_query)
try:
exists_result = self._direct_query(exists_query)
except Exception as error_msg:
self._logger.error(
'EXCEPTION query failed:' +
'\r\ttable_type={0},'.format(str(self.table_type)) +
'\r\tquery={0}'.format(exists_query),
exc_info=True
)
raise error_msg
if len(exists_result) != 1:
warning_str = '-- TABLE {schema_name}.{table_name} NOT FOUND, creating table'.\
format(
schema_name=schema_name,
table_name =table_name
)
self._logger.warning(
'-- WARNING: Table not found. Attempting to create' + \
'\r\ttable_name={0}.{1}'.format(schema_name, table_name)
)
try:
self._create_table(self.get_table_create_string())
except Exception as error_msg:
self._logger.error(
'EXCEPTION: Unable to create table' +
'\r\ttable_name={0}.{1}'.format(schema_name, table_name) +
'\r\ttable_type={0}'.format(self.table_type) +
'\r\tcreate_table_string={0}'.format(self.get_table_create_string()),
exc_info=True
)
raise error_msg
self._logger.info('-- Created Table: {0}.{1}'.format(schema_name, table_name))
else:
self._logger.info('-- Table Already Exists: {0}.{1}'.format(schema_name, table_name))
def test_table_headers(
self,
table_name,
schema_name,
defined_headers
):
'''test if headers are correctly covered by cfg'''
self._logger.info(
'test_table_headers(' +
'\r\ttable_name={0}'.format(table_name) +
'\r\tschema_name={0}'.format(schema_name) +
'\r\tdefined_headers={0}'.format(defined_headers)
)
header_query = ''
if self.table_type == TableType.MySQL:
header_query = \
'''SELECT `COLUMN_NAME`
FROM `INFORMATION_SCHEMA`.`COLUMNS`
WHERE `TABLE_SCHEMA`=\'{schema_name}\'
AND `TABLE_NAME`=\'{table_name}\''''.\
format(
schema_name=schema_name,
table_name =table_name
)
else:
raise UnsupportedTableType(
'unsupported table type: ' + str(self.table_type),
table_name
)
self._logger.debug('-- header_query={0}'.format(header_query))
try:
headers = self._direct_query(header_query)
except Exception as error_msg:
self._logger.error(
'EXCEPTION: query failed:' +
'\r\ttable_type={0}'.format(self.table_type) +
'\r\tquery={0}'.format(header_query),
exc_info=True
)
raise error_msg
#TODO mysql specific? vvv
headers = table_utils.mysql_cleanup_results(headers)
self._logger.debug('-- headers={0}'.format(','.join(headers)))
#FIXME vvv bool_test_headers return values are weird
if not table_utils.bool_test_headers(
headers,
defined_headers,
debug=self._debug,
logger=self._logger #TODO
):
self._logger.warning('WARNING: Table headers not equivalent')
raise MismatchedHeaders(error_msg, table_name)
def get_data(
self,
datetime_start,
*args,
datetime_end=None,
limit=None,
kwargs_passthrough=None,
**kwargs
):
'''process queries to fetch data'''
self._logger.info('get_data()')
#TODO: self._logger.debug(args)
#**kwargs: filter query keys
#*args: data keys to return
if kwargs_passthrough:
self._logger.debug(
'-- received override kwargs: {0}'.format(','.join(kwargs_passthrough.keys()))
)
kwargs = kwargs_passthrough
if isinstance(datetime_start, int):
#assume "last x days"
self._logger.debug('-- type(datetime_start)=INT. Converting to datetime')
datetime_start = table_utils.convert_days_to_datetime(datetime_start)
## Test argument contents before executing ##
try:
table_utils.test_kwargs_headers(self.primary_keys, kwargs)
except Exception as error_msg:
self._logger.error(
'EXCEPTION: query/kwarg keys invalid' +
'kwargs.keys={0} '.format(','.join(kwargs.keys())) +
'primary_keys={0}'.format(','.join(self.primary_keys)),
exc_info=True
)
raise InvalidQueryKeys(error_msg, self.table_name)
try:
table_utils.test_args_headers(self.data_keys, args)
except Exception as error_msg:
self._logger.error(
'EXCEPTION data/args keys invalid ' +
'args={0} '.format(','.join(args)) +
'data_keys={0}'.format(','.join(self.data_keys)),
exc_info=True
)
raise InvalidDataKeys(error_msg, self.table_name)
if isinstance(limit, int):
limit = abs(limit)
elif limit is not None: #<--FIXME: logic is kinda shitty
raise BadQueryModifier(
'limit badType: ' + str(type(limit)),
self.table_name
)
#TODO: test datetimes
## Let's Build A Query! ##
query_header_string = ','.join(args) if args else ','.join(self.data_keys)
max_date_filter = ''
if datetime_end:
max_date_filter = 'AND {index_key} < \'{datetime_string}\''.\
format(
index_key=self.index_key,
datetime_string=str(datetime_end)
)
limit_filter = ''
if limit:
limit_filter = 'LIMIT {limit}'.format(limit=limit)
query_general_filter = \
'''{index_key} > \'{datetime_string}\'
{max_date_filter}'''.\
format(
index_key=self.index_key,
datetime_string=str(datetime_start),
max_date_filter=max_date_filter
)
query_specific_filter = table_utils.format_kwargs(kwargs)
query_keys = ''
if self.primary_keys[0]:
query_keys = ','.join(self.primary_keys) + ',' #for tables without query keys
query_string = '''
SELECT {index_key},{query_keys}{query_header_string}
FROM {table_name}
WHERE {query_general_filter}
{query_specific_filter}
ORDER BY {index_key} DESC
{limit_filter}'''.\
format(
query_header_string=query_header_string,
query_keys=query_keys,
table_name=self.table_name,
query_general_filter=query_general_filter,
query_specific_filter=query_specific_filter,
index_key=self.index_key,
limit_filter=limit_filter
)
self._logger.debug(query_string)
pandas_dataframe = pandas.read_sql(
query_string,
self._connection
)
self._logger.debug(str(pandas_dataframe))
return pandas_dataframe
def put_data(self, payload):
'''tests and pushes data to datastore'''
self._logger.info('put_data()')
if not isinstance(payload, pandas.DataFrame):
raise NotImplementedError(
'put_data() requires Pandas.DataFrame. No conversion implemented'
)
test_result = table_utils.bool_test_headers(
list(payload.columns.values),
self.all_keys,
self._debug,
self._logger
)
if not payload.index.name:
self._logger.info('-- setting payload.index to {0}'.format(self.index_key))
payload.set_index(
keys=self.index_key,
drop=True,
inplace=True
)
#FIXME vvv return types are weird without ConnectionExceptions being passed down
if isinstance(test_result, str):
raise MismatchedHeaders(test_result, self.table_name)
try:
#FIXME vvv to_sql is a problem
payload.to_sql(
name=self.table_name,
con=self._connection,
schema=self.schema_name,
flavor='mysql',
if_exists='append'
)
except Exception as error_msg:
self._logger.error(
'EXCEPTION: Unable to write to table' +
'\r\ttable_name={0}.{1}'.format(self.schema_name, self.table_name),
exc_info=True
)
raise UnableToWriteToDatastore(error_msg, self.table_name)
def __del__(self):
'''release connection/cursor'''
#__del__ needs to be in lowest-child to execute:
#http://www.electricmonk.nl/log/2008/07/07/python-destructor-and-garbage-collection-notes/
#FIXME vvv need to close cursor?
#self._cursor.close()
self._connection.close()
class ConnectionException(Exception):
'''base class for table-connection exceptions'''
def __init__(self, message, tablename):
self.message = message
self.tablename = tablename
def __str__(self):
error_msg = 'CONNECTION EXCEPTION: {tablename}-{message}'.\
format(
tablename=self.tablename,
message=self.message
)
return error_msg
class CreateTableError(ConnectionException):
'''unable to create table (if none existed)'''
pass
class TableKeysMissing(ConnectionException):
'''missing keys in config. Can recreate from db connection...
but manual listing for easier code for now'''
pass
class UnsupportedTableType(ConnectionException):
'''unable to execute command, not supported'''
pass
class MismatchedHeaders(ConnectionException):
'''defined headers and table headers do not line up'''
pass
class InvalidQueryKeys(ConnectionException):
'''tried to pivot table on unsupported keys'''
pass
class InvalidDataKeys(ConnectionException):
'''tried to filter table on unsupported keys'''
pass
class BadQueryModifier(ConnectionException):
'''not a supported modifier type or isinstance() exception'''
pass
class UnableToWriteToDatastore(ConnectionException):
'''issues writing to store'''
pass
|
|
#!/usr/bin/python2
import dicom, cv2, re, sys
import os, fnmatch, shutil, subprocess
import numpy as np
sys.path.append('..')
import SETTINGS as c
from PIL import Image
from heart import getAlignImg;
import h5py
import dsb_utils as du;
"""
This script processes (crop from center, rotate, etc..) all
the sunny-brook dataset images and the hand-labeled images,
and put them into a single hdf5 file for the CNNs to load and train later
"""
np.random.seed(5678);
SZ = int(sys.argv[1]);
noc = 'L'; #useless, fixed
SAX_SERIES = {
# challenge training
"SC-HF-I-1": "0004",
"SC-HF-I-2": "0106",
"SC-HF-I-4": "0116",
"SC-HF-I-10": "0024",
"SC-HF-I-40": "0134",
"SC-HF-NI-3": "0379",
"SC-HF-NI-4": "0501",
"SC-HF-NI-34": "0446",
"SC-HF-NI-36": "0474",
"SC-HYP-1": "0550",
"SC-HYP-3": "0650",
"SC-HYP-38": "0734",
"SC-HYP-40": "0755",
"SC-N-2": "0898",
"SC-N-3": "0915",
"SC-N-40": "0944",
"SC-HF-I-5": "0156",
"SC-HF-I-6": "0180",
"SC-HF-I-7": "0209",
"SC-HF-I-8": "0226",
"SC-HF-NI-11": "0270",
"SC-HF-NI-31": "0401",
"SC-HF-NI-33":"0424",
"SC-HF-NI-7": "0523",
"SC-HYP-37": "0702",
"SC-HYP-6": "0767",
"SC-HYP-7": "0007",
"SC-HYP-8": "0796",
"SC-N-5": "0963",
"SC-N-6": "0981",
"SC-N-7": "1009",
"SC-HF-I-11": "0043",
"SC-HF-I-12": "0062",
"SC-HF-I-9": "0241",
"SC-HF-NI-12": "0286",
"SC-HF-NI-13": "0304",
"SC-HF-NI-14": "0331",
"SC-HF-NI-15": "0359",
"SC-HYP-10": "0579",
"SC-HYP-11": "0601",
"SC-HYP-12": "0629",
"SC-HYP-9": "0003",
"SC-N-10": "0851",
"SC-N-11": "0878",
"SC-N-9": "1031"
}
TRAIN_CONTOUR_PATH = os.path.join(c.data_sunnybrook,
"Sunnybrook Cardiac MR Database ContoursPart3",
"TrainingDataContours")
ONLINE_CONTOUR_PATH = os.path.join(c.data_sunnybrook,
"Sunnybrook Cardiac MR Database ContoursPart1",
"OnlineDataContours")
VAL_CONTOUR_PATH = os.path.join(c.data_sunnybrook,
"Sunnybrook Cardiac MR Database ContoursPart2",
"ValidationDataContours")
TRAIN_IMG_PATH = os.path.join(c.data_sunnybrook,
"challenge_training")
VAL_IMG_PATH = os.path.join(c.data_sunnybrook,
"challenge_validation")
ONLINE_IMG_PATH = os.path.join(c.data_sunnybrook,
"challenge_online", "challenge_online")
# creates and hdf5 file from a dataset given a split in the form {'train':(0,n)}, etc
# appears to save in unpredictable order, so order must be verified after creation
def save_hd5py(dataset_dict, destfile, indices_dict_or_numfolds):
indices_dict = indices_dict_or_numfolds
if isinstance(indices_dict, int):
folds = indices_dict
n = max(len(it) for it in dataset_dict.values())
fold_n = n // folds
indices_dict = dict(('fold_{}'.format(i), (i*fold_n, (i+1)*fold_n)) \
for i in range(folds))
print indices_dict
f = h5py.File(destfile, mode='w')
for name, dataset in dataset_dict.iteritems():
dat = f.create_dataset(name, dataset.shape, dtype=str(dataset.dtype))
dat[...] = dataset
split_dict = dict((k, dict((name, v) for name in dataset_dict.iterkeys()))
for k,v in indices_dict.iteritems())
from fuel.datasets.hdf5 import H5PYDataset
f.attrs['split'] = H5PYDataset.create_split_array(split_dict)
f.flush()
f.close()
def shrink_case(case):
toks = case.split("-")
def shrink_if_number(x):
try:
cvt = int(x)
return str(cvt)
except ValueError:
return x
return "-".join([shrink_if_number(t) for t in toks])
class Contour(object):
def __init__(self, ctr_path):
self.ctr_path = ctr_path
match = re.search(r"/([^/]*)/contours-manual/IRCCI-expert/IM-0001-(\d{4})-icontour-manual.txt", ctr_path)
self.case = shrink_case(match.group(1))
self.img_no = int(match.group(2))
def __str__(self):
return "<Contour for case %s, image %d>" % (self.case, self.img_no)
__repr__ = __str__
def load_contour(contour, img_path):
filename = "IM-%s-%04d.dcm" % (SAX_SERIES[contour.case], contour.img_no)
full_path = os.path.join(img_path, contour.case, filename)
f = dicom.read_file(full_path)
ctrs = np.loadtxt(contour.ctr_path, delimiter=" ").astype(np.int)
label = np.zeros(f.pixel_array.shape, dtype=np.uint8)
cv2.fillPoly(label, [ctrs], 255)
img,lab = getAlignImg(f,label);
lx,ly = img.shape;
assert(lx==ly);
xm,ym = np.where(lab>127);
if xm.size<30:
xm,ym = lx//2,ly//2;
xm = np.mean(xm);
ym = np.mean(ym);
delta = int(lx*0.62)//2;#cut middle 160x160 from 256x256 for sunny brook data
assert(delta<xm and delta<ym);
xm,ym,delta = int(xm),int(ym),int(delta);
img = img[xm-delta:xm+delta,ym-delta:ym+delta];
lab = lab[xm-delta:xm+delta,ym-delta:ym+delta];
return cv2.resize(img, (SZ,SZ)), cv2.resize(lab, (SZ,SZ))
def get_all_contours(contour_path):
contours = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(contour_path)
for f in fnmatch.filter(files, 'IM-0001-*-icontour-manual.txt')]
np.random.shuffle(contours)
print("Number of examples: {:d}".format(len(contours)))
extracted = map(Contour, contours)
return extracted
def process_contours(contours, img_path):
n = len(contours)
imgs_arr = np.empty((n, 1, SZ, SZ), dtype=np.uint8)
labels_arr = np.empty((n, 1, SZ, SZ), dtype=np.uint8)
for idx,ctr in enumerate(contours):
img, label = load_contour(ctr, img_path)
imgs_arr[idx,0]=img
labels_arr[idx,0]=label
return imgs_arr, labels_arr
contour_paths = [TRAIN_CONTOUR_PATH, VAL_CONTOUR_PATH, ONLINE_CONTOUR_PATH]
image_paths = [TRAIN_IMG_PATH, VAL_IMG_PATH, ONLINE_IMG_PATH]
all_data = []
##!!!!!!NOTICE: train label all has range 0-255!!!
if __name__=='__main__':
aug_contour_path = os.path.join(c.data_aug_contours, 'contours')
aug_image_path = os.path.join(c.data_aug_contours, 'images')
# add in manually segmented images first so they're in training set
# labels are zero or one, not 255
for contour_img in filter(lambda x: 'jpg' in x, os.listdir(aug_contour_path)):
if 'auto' in contour_img:
continue
img = cv2.imread(os.path.join(aug_image_path, contour_img), 0)
lfile = os.path.join(aug_contour_path, contour_img);
if 'c_' in contour_img:#labeled image by qi
label = cv2.imread(lfile[:-3]+'png',0);
_,label = cv2.threshold(label, 1,255,cv2.THRESH_BINARY_INV)
else:
label = cv2.imread(lfile, 0)
_,label = cv2.threshold(label, 127,255,cv2.THRESH_BINARY_INV)
#did not rotate for these cases, it might be fine since these align well with my direction
lx,ly = img.shape;
assert(lx==ly);
xm,ym = np.where(label>127);
if xm.size<30:
xm,ym = lx//2,ly//2;
xm = np.mean(xm);
ym = np.mean(ym);
delta = int(lx*0.75)//2;#cut middle ~160x160 from 246x246 for Tencia's image
if not (delta<xm and delta<ym):
print(xm,ym,lx,"not in the middle: ",contour_img);
delta = min(xm,ym);
xm,ym,delta = int(xm),int(ym),int(delta)
img = img[xm-delta:xm+delta,ym-delta:ym+delta];
label = label[xm-delta:xm+delta,ym-delta:ym+delta];
img = cv2.resize(img, (SZ,SZ)).reshape(1,1,SZ,SZ)
label = cv2.resize(label, (SZ,SZ))
label = label.reshape(1,1,SZ,SZ);##train label all has range 0-255!!!
all_data.append([img,label])
##add in no contour images!!
if noc == 'L':
#with open('nocontour_{}.csv'.format(noc)) as f:
with open(c.manual_data_root + '/nocontour.csv') as f:
label = np.zeros((1,1,SZ,SZ),dtype=np.uint8);
for l in f:
row = [int(x) for x in l.split(',')];
case = row[0];
s = row[1::2];
t = row[2::2];
assert(len(s)==len(t));
dset = du.CNN_Dataset(case,img_size = SZ);
n = len(s);
print("add case {} no contour imgs".format(case))
for i in range(n):
img = dset.images[s[i],t[i],0].reshape(1,1,SZ,SZ);
all_data.append([img,label]);
#if noc == 'auto':
# import pickle
# ddir = c.data_auto_contours+'/size_{}'.format(SZ);
# for img_con in filter(lambda x: 'pkl' in x, os.listdir(ddir)):
# with open(ddir +'/'+ img_con,'rb') as f:
# x = pickle.load(f)
# img,label = x[0],x[1]
# img = img.reshape(1,1,SZ,SZ);
# label = label.reshape(1,1,SZ,SZ);
# all_data.append([img,label]);
for cpath, ipath in zip(contour_paths, image_paths):
train_ctrs = get_all_contours(cpath)
imgs, labels = process_contours(train_ctrs, ipath)
all_data.append([imgs,labels])
all_imgs = np.concatenate([a[0] for a in all_data], axis=0)
all_labels = np.concatenate([a[1] for a in all_data], axis=0)
#shuffle
idx = np.arange(all_imgs.shape[0]);
np.random.shuffle(idx)
all_imgs = all_imgs[idx];
all_labels = all_labels[idx];
n = all_imgs.shape[0]
print("total number :",n);
fn = os.path.join(c.data_sunnybrook, 'scd_seg_noc{}_{}.hdf5'.format(noc,SZ))
save_hd5py({'images': all_imgs, 'labels': all_labels}, fn, 5)
|
|
"""Simple word2vec implementation on Tensorflow
"""
# Origin: https://github.com/tensorflow/blob/r0.9/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
# Coding: utf-8
# Filename: word2vec_basic.py - Python 2.7
# Created: 2016-06-27 v0.0
# Description:
## v0.0: Basic skip-gram model with Tensorflow
## v0.1: Refactorize code
from __future__ import division
from __future__ import print_function
# high performance containers: namedtuples, defaultdict, OrderedDict
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib, xrange
import tensorflow as tf
__author__ = "Hoang NT"
# Step 1: Download data and save to datapath folder
url = 'http://mattmahoney.net/dc/'
datapath = './data/'
def maybe_download(filename, expected_bytes):
"""
Check and download data file.
Parameters
----------
filename: Name of the data file
If file is not found in the datapath folder, it
will be downloaded there.
expected_bytes: Expected data file size
After downloading, the file will be checked
if it matches the expected size.
Example
-------
Download the words corpus from provided url
>>> maybe_download('./data/text8.zip', 31344016)
Found and verified ./data/text8.zip
"""
filepath = datapath + filename
if not os.path.exists(filepath):
# urlretrieve returns a tuple of saved filepath and info() of the downloaded file
filepath, _ = urllib.request.urlretrieve(url+filename, filepath)
statinfo = os.stat(filepath)
if statinfo.st_size == expected_bytes:
print('Found and verified', filepath)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filepath + '. Can you get to it with a browser?')
return filepath
filepath = maybe_download('text8.zip', 31344016)
# Read data into a list of strings
def read_data(filename):
"""
Extract the file and read as list of words.
Parameter
---------
filename: Name/location of the zipfile
filename is the location of the zip file that
will be extraced and read.
"""
# Using with-as to forget about cleaning up opened files after use
with zipfile.ZipFile(filename) as f:
# tf.compat.as_str(.) : converting input to string - use for compatibility between PY2 and PY3.
# f.namelist(): return list of archive members by name.
# f.read(.): return bytes of the filename input from the archive.
# split(): split string to list on whitespace (default).
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filepath)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rate words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
"""
Rebuild dataset as: 2 dictionaries mapping from index to
word and vice versa, data that contain the original text
but in index form, and a count that maps from word to its
count in the corpus.
Parameter
---------
words: Input text
List of words from the input data.
"""
# count store list of tuples: (word, count)
count = [['UNK', -1]]
# extend(.): append a given list to self
# most_common(.): return list of n most common elements as tuple.
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
# create a dictionary mapping word to its count ranking dictionary['the'] = 1 - most common
for word, _ in count:
dictionary[word] = len(dictionary)
# count number of discarded words and store the dictionary index for each word index in data
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count += 1
data.append(index)
count[0][1] = unk_count
# reverse dictionary mapping from word to its count ranking
# zip(.): create list of tuples from each provided list. E.g.
# [( list1[0], list2[0] ), ( list1[1], list2[1] ) ...]
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # reduce memory
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
"""
Generate data for training.
Parameters
----------
batch_size: Number of samples
Number of samples in the generated batch. Each sample
is a pair of words that is close to each other.
num_skips: Number of skips in skipgram model
Number of word pairs generated per target word.
skip_window: Skipgram model window
This window defines the range (to the left and right) at
which words are selected randomly for the target word in
the center of the skip_window.
Example
-------
>>> generate_batch(20, 4, 5)
Return 2 arrays each has 20 elements of word index:
batch: 20 word indices of target words
labels: 20 word indices of context words
"""
# global keyword gives this function access to global variable data_index
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1
# Create a double-ended queue (both stack and queue) for word buffer
# maxlen - keeping a fixed sliding window
buffer = collections.deque(maxlen=span)
for _ in range(span):
# Shift the skipgram window to the left by 1
buffer.append(data[data_index])
# Increase data_index for next shift
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
# target label at the center of the buffer
target = skip_window
# avoid the target word and later selected words
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
# batch is the same word for current num_skip
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
# Create batch of 8 and print its data
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i,0], reverse_dictionary[labels[i,0]])
# Step 4: Build and train a skip-gram model
# Size of training dataset
batch_size = 128
embedding_size = 128
skip_window = 1
num_skips = 2
# Random validation size
valid_size = 16
valid_window = 100
# Choose valid_size elements from array arange(valid_window)
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64
# Create a tensorflow graph instance
graph = tf.Graph()
# Add nodes to the created graph
with graph.as_default():
# Placeholder for inputs and labels
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Constant valid set
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
with tf.device('/cpu:0'):
# Input matrix init with uniform random vals minval = -1.0, maxval = 1.0
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# Look up [train_inputs] in a list of [embeddings tensors].
# Return the tensor of embeddings of train_input.
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Noise contrastive estimation loss function
# tf.nn.nce_loss takes nce_weights multiply with embed then pluses nce_biases
# then cross sigmoid entropy with train_labels. It also generates negative
# samples using vocabulary_size for each train_labels.
loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Normalize embeddings
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
# Compute valid set embeddings
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
# Computer dot products for each word in valid set with the normalized embeddings
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
init = tf.initialize_all_variables()
num_steps = 100001
with tf.Session(graph=graph) as session:
init.run()
print("Initialized")
average_loss = 0
# For each step, generate a training batch
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}
# Run optimizer and loss
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
# Computer average loss at every 2000 steps
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Print top similar words for each word in valid set
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
"""
Plot in 2D.
Parameters
----------
low_dim_embs: Lower dimension embeddings (usually from t-SNE).
2D representation of word embeddings for plot. This data
usually optained from running t-SNE on the learned embeddings.
labels: label list for each embeddings.
filename: Output filename to write to disk.
"""
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18,18))
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x,y)
plt.annotate(labels,
xy=(x,y),
xytext=(5,2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
# Only plot first 500
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Install sklearn and matplotlib.")
# For importing functions to test
def main():
print('This is main!')
if __name__ == "__main__":
main()
|
|
# Generated by Django 2.2.2 on 2019-07-24 05:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
("common", "0017_auto_20190722_1443"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("invoices", "0004_auto_20190603_1844"),
]
operations = [
migrations.CreateModel(
name="InvoiceHistory",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"invoice_title",
models.CharField(max_length=50, verbose_name="Invoice Title"),
),
(
"invoice_number",
models.CharField(max_length=50, verbose_name="Invoice Number"),
),
("name", models.CharField(max_length=100, verbose_name="Name")),
("email", models.EmailField(max_length=254, verbose_name="Email")),
("quantity", models.PositiveIntegerField(default=0)),
(
"rate",
models.DecimalField(decimal_places=2, default=0, max_digits=12),
),
(
"total_amount",
models.DecimalField(
blank=True, decimal_places=2, max_digits=12, null=True
),
),
(
"currency",
models.CharField(
blank=True,
choices=[
("AED", "AED, Dirham"),
("AFN", "AFN, Afghani"),
("ALL", "ALL, Lek"),
("AMD", "AMD, Dram"),
("ANG", "ANG, Guilder"),
("AOA", "AOA, Kwanza"),
("ARS", "ARS, Peso"),
("AUD", "AUD, Dollar"),
("AWG", "AWG, Guilder"),
("AZN", "AZN, Manat"),
("BAM", "BAM, Marka"),
("BBD", "BBD, Dollar"),
("BDT", "BDT, Taka"),
("BGN", "BGN, Lev"),
("BHD", "BHD, Dinar"),
("BIF", "BIF, Franc"),
("BMD", "BMD, Dollar"),
("BND", "BND, Dollar"),
("BOB", "BOB, Boliviano"),
("BRL", "BRL, Real"),
("BSD", "BSD, Dollar"),
("BTN", "BTN, Ngultrum"),
("BWP", "BWP, Pula"),
("BYR", "BYR, Ruble"),
("BZD", "BZD, Dollar"),
("CAD", "CAD, Dollar"),
("CDF", "CDF, Franc"),
("CHF", "CHF, Franc"),
("CLP", "CLP, Peso"),
("CNY", "CNY, Yuan Renminbi"),
("COP", "COP, Peso"),
("CRC", "CRC, Colon"),
("CUP", "CUP, Peso"),
("CVE", "CVE, Escudo"),
("CZK", "CZK, Koruna"),
("DJF", "DJF, Franc"),
("DKK", "DKK, Krone"),
("DOP", "DOP, Peso"),
("DZD", "DZD, Dinar"),
("EGP", "EGP, Pound"),
("ERN", "ERN, Nakfa"),
("ETB", "ETB, Birr"),
("EUR", "EUR, Euro"),
("FJD", "FJD, Dollar"),
("FKP", "FKP, Pound"),
("GBP", "GBP, Pound"),
("GEL", "GEL, Lari"),
("GHS", "GHS, Cedi"),
("GIP", "GIP, Pound"),
("GMD", "GMD, Dalasi"),
("GNF", "GNF, Franc"),
("GTQ", "GTQ, Quetzal"),
("GYD", "GYD, Dollar"),
("HKD", "HKD, Dollar"),
("HNL", "HNL, Lempira"),
("HRK", "HRK, Kuna"),
("HTG", "HTG, Gourde"),
("HUF", "HUF, Forint"),
("IDR", "IDR, Rupiah"),
("ILS", "ILS, Shekel"),
("INR", "INR, Rupee"),
("IQD", "IQD, Dinar"),
("IRR", "IRR, Rial"),
("ISK", "ISK, Krona"),
("JMD", "JMD, Dollar"),
("JOD", "JOD, Dinar"),
("JPY", "JPY, Yen"),
("KES", "KES, Shilling"),
("KGS", "KGS, Som"),
("KHR", "KHR, Riels"),
("KMF", "KMF, Franc"),
("KPW", "KPW, Won"),
("KRW", "KRW, Won"),
("KWD", "KWD, Dinar"),
("KYD", "KYD, Dollar"),
("KZT", "KZT, Tenge"),
("LAK", "LAK, Kip"),
("LBP", "LBP, Pound"),
("LKR", "LKR, Rupee"),
("LRD", "LRD, Dollar"),
("LSL", "LSL, Loti"),
("LTL", "LTL, Litas"),
("LVL", "LVL, Lat"),
("LYD", "LYD, Dinar"),
("MAD", "MAD, Dirham"),
("MDL", "MDL, Leu"),
("MGA", "MGA, Ariary"),
("MKD", "MKD, Denar"),
("MMK", "MMK, Kyat"),
("MNT", "MNT, Tugrik"),
("MOP", "MOP, Pataca"),
("MRO", "MRO, Ouguiya"),
("MUR", "MUR, Rupee"),
("MVR", "MVR, Rufiyaa"),
("MWK", "MWK, Kwacha"),
("MXN", "MXN, Peso"),
("MYR", "MYR, Ringgit"),
("MZN", "MZN, Metical"),
("NAD", "NAD, Dollar"),
("NGN", "NGN, Naira"),
("NIO", "NIO, Cordoba"),
("NOK", "NOK, Krone"),
("NPR", "NPR, Rupee"),
("NZD", "NZD, Dollar"),
("OMR", "OMR, Rial"),
("PAB", "PAB, Balboa"),
("PEN", "PEN, Sol"),
("PGK", "PGK, Kina"),
("PHP", "PHP, Peso"),
("PKR", "PKR, Rupee"),
("PLN", "PLN, Zloty"),
("PYG", "PYG, Guarani"),
("QAR", "QAR, Rial"),
("RON", "RON, Leu"),
("RSD", "RSD, Dinar"),
("RUB", "RUB, Ruble"),
("RWF", "RWF, Franc"),
("SAR", "SAR, Rial"),
("SBD", "SBD, Dollar"),
("SCR", "SCR, Rupee"),
("SDG", "SDG, Pound"),
("SEK", "SEK, Krona"),
("SGD", "SGD, Dollar"),
("SHP", "SHP, Pound"),
("SLL", "SLL, Leone"),
("SOS", "SOS, Shilling"),
("SRD", "SRD, Dollar"),
("SSP", "SSP, Pound"),
("STD", "STD, Dobra"),
("SYP", "SYP, Pound"),
("SZL", "SZL, Lilangeni"),
("THB", "THB, Baht"),
("TJS", "TJS, Somoni"),
("TMT", "TMT, Manat"),
("TND", "TND, Dinar"),
("TOP", "TOP, Paanga"),
("TRY", "TRY, Lira"),
("TTD", "TTD, Dollar"),
("TWD", "TWD, Dollar"),
("TZS", "TZS, Shilling"),
("UAH", "UAH, Hryvnia"),
("UGX", "UGX, Shilling"),
("USD", "$, Dollar"),
("UYU", "UYU, Peso"),
("UZS", "UZS, Som"),
("VEF", "VEF, Bolivar"),
("VND", "VND, Dong"),
("VUV", "VUV, Vatu"),
("WST", "WST, Tala"),
("XAF", "XAF, Franc"),
("XCD", "XCD, Dollar"),
("XOF", "XOF, Franc"),
("XPF", "XPF, Franc"),
("YER", "YER, Rial"),
("ZAR", "ZAR, Rand"),
("ZMK", "ZMK, Kwacha"),
("ZWL", "ZWL, Dollar"),
],
max_length=3,
null=True,
),
),
(
"phone",
phonenumber_field.modelfields.PhoneNumberField(
blank=True, max_length=128, null=True
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
(
"amount_due",
models.DecimalField(
blank=True, decimal_places=2, max_digits=12, null=True
),
),
(
"amount_paid",
models.DecimalField(
blank=True, decimal_places=2, max_digits=12, null=True
),
),
("is_email_sent", models.BooleanField(default=False)),
(
"status",
models.CharField(
choices=[
("Draft", "Draft"),
("Sent", "Sent"),
("Paid", "Paid"),
("Pending", "Pending"),
("Cancelled", "Cancel"),
],
default="Draft",
max_length=15,
),
),
(
"details",
models.TextField(blank=True, null=True, verbose_name="Details"),
),
("due_date", models.DateField(blank=True, null=True)),
(
"assigned_to",
models.ManyToManyField(
related_name="invoice_history_assigned_to",
to=settings.AUTH_USER_MODEL,
),
),
(
"from_address",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoice_history_from_address",
to="common.Address",
),
),
(
"invoice",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invoice_history",
to="invoices.Invoice",
),
),
(
"to_address",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoice_history_to_address",
to="common.Address",
),
),
(
"updated_by",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoice_history_created_by",
to=settings.AUTH_USER_MODEL,
),
),
],
),
]
|
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
UCASSCF (CASSCF without spin-degeneracy between alpha and beta orbitals)
1-step optimization algorithm
'''
import time
import copy
from functools import reduce
import numpy
import scipy.linalg
import pyscf.gto
import pyscf.scf
from pyscf.lib import logger
from pyscf.mcscf import ucasci
from pyscf.mcscf.mc1step import expmat, rotate_orb_cc
from pyscf.mcscf import umc_ao2mo
from pyscf.mcscf import chkfile
from pyscf import __config__
#FIXME: when the number of core orbitals are different for alpha and beta,
# the convergence are very unstable and slow
# gradients, hessian operator and hessian diagonal
def gen_g_hop(casscf, mo, u, casdm1s, casdm2s, eris):
ncas = casscf.ncas
ncore = casscf.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
nmo = casscf.mo_coeff[0].shape[1]
dm1 = numpy.zeros((2,nmo,nmo))
idx = numpy.arange(ncore[0])
dm1[0,idx,idx] = 1
idx = numpy.arange(ncore[1])
dm1[1,idx,idx] = 1
dm1[0,ncore[0]:nocc[0],ncore[0]:nocc[0]] = casdm1s[0]
dm1[1,ncore[1]:nocc[1],ncore[1]:nocc[1]] = casdm1s[1]
# part2, part3
vhf_c = eris.vhf_c
vhf_ca = (vhf_c[0] + numpy.einsum('uvpq,uv->pq', eris.aapp, casdm1s[0]) \
- numpy.einsum('upqv,uv->pq', eris.appa, casdm1s[0]) \
+ numpy.einsum('uvpq,uv->pq', eris.AApp, casdm1s[1]),
vhf_c[1] + numpy.einsum('uvpq,uv->pq', eris.aaPP, casdm1s[0]) \
+ numpy.einsum('uvpq,uv->pq', eris.AAPP, casdm1s[1]) \
- numpy.einsum('upqv,uv->pq', eris.APPA, casdm1s[1]),)
################# gradient #################
hdm2 = [ numpy.einsum('tuvw,vwpq->tupq', casdm2s[0], eris.aapp) \
+ numpy.einsum('tuvw,vwpq->tupq', casdm2s[1], eris.AApp),
numpy.einsum('vwtu,vwpq->tupq', casdm2s[1], eris.aaPP) \
+ numpy.einsum('tuvw,vwpq->tupq', casdm2s[2], eris.AAPP)]
hcore = casscf.get_hcore()
h1e_mo = (reduce(numpy.dot, (mo[0].T, hcore[0], mo[0])),
reduce(numpy.dot, (mo[1].T, hcore[1], mo[1])))
g = [numpy.dot(h1e_mo[0], dm1[0]),
numpy.dot(h1e_mo[1], dm1[1])]
def gpart(m):
g[m][:,:ncore[m]] += vhf_ca[m][:,:ncore[m]]
g[m][:,ncore[m]:nocc[m]] += \
numpy.einsum('vuuq->qv', hdm2[m][:,:,ncore[m]:nocc[m]]) \
+ numpy.dot(vhf_c[m][:,ncore[m]:nocc[m]], casdm1s[m])
gpart(0)
gpart(1)
def gorb_update(u, fcivec):
r0 = casscf.pack_uniq_var(u)
return g_orb + h_op(r0)
############## hessian, diagonal ###########
# part1
tmp = casdm2s[0].transpose(1,2,0,3) + casdm2s[0].transpose(0,2,1,3)
hdm2apap = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.appa)
hdm2apap += hdm2[0].transpose(0,2,1,3)
hdm2[0] = hdm2apap
tmp = casdm2s[1].transpose(1,2,0,3) + casdm2s[1].transpose(0,2,1,3)
# (jp|RK) *[e(jq,SK) + e(jq,LS)] => qSpR
hdm2apAP = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.apPA)
# (JP|rk) *[e(sk,JQ) + e(ls,JQ)] => QsPr
#hdm2APap = hdm2apAP.transpose(2,3,0,1)
tmp = casdm2s[2].transpose(1,2,0,3) + casdm2s[2].transpose(0,2,1,3)
hdm2APAP = numpy.einsum('uvtw,tpqw->upvq', tmp, eris.APPA)
hdm2APAP += hdm2[1].transpose(0,2,1,3)
hdm2[1] = hdm2APAP
# part7
# h_diag[0] ~ alpha-alpha
h_diag = [numpy.einsum('ii,jj->ij', h1e_mo[0], dm1[0]) - h1e_mo[0] * dm1[0],
numpy.einsum('ii,jj->ij', h1e_mo[1], dm1[1]) - h1e_mo[1] * dm1[1]]
h_diag[0] = h_diag[0] + h_diag[0].T
h_diag[1] = h_diag[1] + h_diag[1].T
# part8
idx = numpy.arange(nmo)
g_diag = g[0].diagonal()
h_diag[0] -= g_diag + g_diag.reshape(-1,1)
h_diag[0][idx,idx] += g_diag * 2
g_diag = g[1].diagonal()
h_diag[1] -= g_diag + g_diag.reshape(-1,1)
h_diag[1][idx,idx] += g_diag * 2
# part2, part3
def fpart2(m):
v_diag = vhf_ca[m].diagonal() # (pr|kl) * e(sq,lk)
h_diag[m][:,:ncore[m]] += v_diag.reshape(-1,1)
h_diag[m][:ncore[m]] += v_diag
idx = numpy.arange(ncore[m])
# (V_{qr} delta_{ps} + V_{ps} delta_{qr}) delta_{pr} delta_{sq}
h_diag[m][idx,idx] -= v_diag[:ncore[m]] * 2
fpart2(0)
fpart2(1)
def fpart3(m):
# V_{pr} e_{sq}
tmp = numpy.einsum('ii,jj->ij', vhf_c[m], casdm1s[m])
h_diag[m][:,ncore[m]:nocc[m]] += tmp
h_diag[m][ncore[m]:nocc[m],:] += tmp.T
tmp = -vhf_c[m][ncore[m]:nocc[m],ncore[m]:nocc[m]] * casdm1s[m]
h_diag[m][ncore[m]:nocc[m],ncore[m]:nocc[m]] += tmp + tmp.T
fpart3(0)
fpart3(1)
# part4
def fpart4(jkcpp, m):
# (qp|rs)-(pr|sq) rp in core
tmp = -numpy.einsum('cpp->cp', jkcpp)
# (qp|sr) - (qr|sp) rp in core => 0
h_diag[m][:ncore[m],:] += tmp
h_diag[m][:,:ncore[m]] += tmp.T
h_diag[m][:ncore[m],:ncore[m]] -= tmp[:,:ncore[m]] * 2
fpart4(eris.jkcpp, 0)
fpart4(eris.jkcPP, 1)
# part5 and part6 diag
#+(qr|kp) e_s^k p in core, sk in active
#+(qr|sl) e_l^p s in core, pl in active
#-(qj|sr) e_j^p s in core, jp in active
#-(qp|kr) e_s^k p in core, sk in active
#+(qj|rs) e_j^p s in core, jp in active
#+(qp|rl) e_l^s p in core, ls in active
#-(qs|rl) e_l^p s in core, lp in active
#-(qj|rp) e_j^s p in core, js in active
def fpart5(jkcpp, m):
jkcaa = jkcpp[:,ncore[m]:nocc[m],ncore[m]:nocc[m]]
tmp = -2 * numpy.einsum('jik,ik->ji', jkcaa, casdm1s[m])
h_diag[m][:ncore[m],ncore[m]:nocc[m]] -= tmp
h_diag[m][ncore[m]:nocc[m],:ncore[m]] -= tmp.T
fpart5(eris.jkcpp, 0)
fpart5(eris.jkcPP, 1)
def fpart1(m):
v_diag = numpy.einsum('ijij->ij', hdm2[m])
h_diag[m][ncore[m]:nocc[m],:] += v_diag
h_diag[m][:,ncore[m]:nocc[m]] += v_diag.T
fpart1(0)
fpart1(1)
g_orb = casscf.pack_uniq_var((g[0]-g[0].T, g[1]-g[1].T))
h_diag = casscf.pack_uniq_var(h_diag)
def h_op(x):
x1a, x1b = casscf.unpack_uniq_var(x)
xa_cu = x1a[:ncore[0],ncore[0]:]
xa_av = x1a[ncore[0]:nocc[0],nocc[0]:]
xa_ac = x1a[ncore[0]:nocc[0],:ncore[0]]
xb_cu = x1b[:ncore[1],ncore[1]:]
xb_av = x1b[ncore[1]:nocc[1],nocc[1]:]
xb_ac = x1b[ncore[1]:nocc[1],:ncore[1]]
# part7
x2a = reduce(numpy.dot, (h1e_mo[0], x1a, dm1[0]))
x2b = reduce(numpy.dot, (h1e_mo[1], x1b, dm1[1]))
# part8, the hessian gives
#x2a -= numpy.dot(g[0], x1a)
#x2b -= numpy.dot(g[1], x1b)
# it may ruin the hermitian of hessian unless g == g.T. So symmetrize it
# x_{pq} -= g_{pr} \delta_{qs} x_{rs} * .5
# x_{rs} -= g_{rp} \delta_{sq} x_{pq} * .5
x2a -= numpy.dot(g[0].T, x1a)
x2b -= numpy.dot(g[1].T, x1b)
# part2
x2a[:ncore[0]] += numpy.dot(xa_cu, vhf_ca[0][ncore[0]:])
x2b[:ncore[1]] += numpy.dot(xb_cu, vhf_ca[1][ncore[1]:])
# part3
def fpart3(m, x2, x_av, x_ac):
x2[ncore[m]:nocc[m]] += reduce(numpy.dot, (casdm1s[m], x_av, vhf_c[m][nocc[m]:])) \
+ reduce(numpy.dot, (casdm1s[m], x_ac, vhf_c[m][:ncore[m]]))
fpart3(0, x2a, xa_av, xa_ac)
fpart3(1, x2b, xb_av, xb_ac)
# part1
x2a[ncore[0]:nocc[0]] += numpy.einsum('upvr,vr->up', hdm2apap, x1a[ncore[0]:nocc[0]])
x2a[ncore[0]:nocc[0]] += numpy.einsum('upvr,vr->up', hdm2apAP, x1b[ncore[1]:nocc[1]])
x2b[ncore[1]:nocc[1]] += numpy.einsum('vrup,vr->up', hdm2apAP, x1a[ncore[0]:nocc[0]])
x2b[ncore[1]:nocc[1]] += numpy.einsum('upvr,vr->up', hdm2APAP, x1b[ncore[1]:nocc[1]])
# part4, part5, part6
if ncore[0] > 0 or ncore[1] > 0:
va, vc = casscf.update_jk_in_ah(mo, (x1a,x1b), casdm1s, eris)
x2a[ncore[0]:nocc[0]] += va[0]
x2b[ncore[1]:nocc[1]] += va[1]
x2a[:ncore[0],ncore[0]:] += vc[0]
x2b[:ncore[1],ncore[1]:] += vc[1]
x2a = x2a - x2a.T
x2b = x2b - x2b.T
return casscf.pack_uniq_var((x2a,x2b))
return g_orb, gorb_update, h_op, h_diag
def kernel(casscf, mo_coeff, tol=1e-7, conv_tol_grad=None,
ci0=None, callback=None, verbose=None, dump_chk=True):
if verbose is None:
verbose = casscf.verbose
log = logger.Logger(casscf.stdout, verbose)
cput0 = (time.clock(), time.time())
log.debug('Start 1-step CASSCF')
mo = mo_coeff
nmo = mo[0].shape[1]
#TODO: lazy evaluate eris, to leave enough memory for FCI solver
eris = casscf.ao2mo(mo)
e_tot, e_cas, fcivec = casscf.casci(mo, ci0, eris, log, locals())
if casscf.ncas == nmo and not casscf.internal_rotation:
return True, e_tot, e_cas, fcivec, mo
if conv_tol_grad is None:
conv_tol_grad = numpy.sqrt(tol)
logger.info(casscf, 'Set conv_tol_grad to %g', conv_tol_grad)
conv_tol_ddm = conv_tol_grad * 3
conv = False
totmicro = totinner = 0
norm_gorb = norm_gci = 0
de, elast = e_tot, e_tot
r0 = None
t1m = log.timer('Initializing 1-step CASSCF', *cput0)
casdm1, casdm2 = casscf.fcisolver.make_rdm12s(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm = 1e2
casdm1_last = casdm1
t3m = t2m = log.timer('CAS DM', *t1m)
imacro = 0
while not conv and imacro < casscf.max_cycle_macro:
imacro += 1
max_cycle_micro = casscf.micro_cycle_scheduler(locals())
max_stepsize = casscf.max_stepsize_scheduler(locals())
imicro = 0
rota = casscf.rotate_orb_cc(mo, lambda:fcivec, lambda:casdm1, lambda:casdm2,
eris, r0, conv_tol_grad*.3, max_stepsize, log)
for u, g_orb, njk, r0 in rota:
imicro += 1
norm_gorb = numpy.linalg.norm(g_orb)
if imicro == 1:
norm_gorb0 = norm_gorb
norm_t = numpy.linalg.norm(u-numpy.eye(nmo))
if imicro >= max_cycle_micro:
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g ',
imicro, norm_t, norm_gorb)
break
casdm1, casdm2, gci, fcivec = casscf.update_casdm(mo, u, fcivec, e_cas, eris)
norm_ddm =(numpy.linalg.norm(casdm1[0] - casdm1_last[0])
+ numpy.linalg.norm(casdm1[1] - casdm1_last[1]))
t3m = log.timer('update CAS DM', *t3m)
if isinstance(gci, numpy.ndarray):
norm_gci = numpy.linalg.norm(gci)
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%5.3g |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
else:
norm_gci = None
log.debug('micro %d |u-1|=%5.3g |g[o]|=%5.3g |g[c]|=%s |ddm|=%5.3g',
imicro, norm_t, norm_gorb, norm_gci, norm_ddm)
if callable(callback):
callback(locals())
t3m = log.timer('micro iter %d'%imicro, *t3m)
if (norm_t < 1e-4 or
(norm_gorb < conv_tol_grad*.5 and norm_ddm < conv_tol_ddm*.4)):
break
rota.close()
rota = None
totmicro += imicro
totinner += njk
eris = None
u = copy.copy(u)
g_orb = copy.copy(g_orb)
mo = casscf.rotate_mo(mo, u, log)
eris = casscf.ao2mo(mo)
t2m = log.timer('update eri', *t3m)
e_tot, e_cas, fcivec = casscf.casci(mo, fcivec, eris, log, locals())
casdm1, casdm2 = casscf.fcisolver.make_rdm12s(fcivec, casscf.ncas, casscf.nelecas)
norm_ddm =(numpy.linalg.norm(casdm1[0] - casdm1_last[0])
+ numpy.linalg.norm(casdm1[1] - casdm1_last[1]))
casdm1_last = casdm1
log.timer('CASCI solver', *t2m)
t2m = t1m = log.timer('macro iter %d'%imacro, *t1m)
de, elast = e_tot - elast, e_tot
if (abs(de) < tol
and (norm_gorb0 < conv_tol_grad and norm_ddm < conv_tol_ddm)):
conv = True
if dump_chk:
casscf.dump_chk(locals())
if callable(callback):
callback(locals())
if conv:
log.info('1-step CASSCF converged in %d macro (%d JK %d micro) steps',
imacro+1, totinner, totmicro)
else:
log.info('1-step CASSCF not converged, %d macro (%d JK %d micro) steps',
imacro+1, totinner, totmicro)
log.timer('1-step CASSCF', *cput0)
return conv, e_tot, e_cas, fcivec, mo
class UCASSCF(ucasci.UCASCI):
max_stepsize = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_stepsize', .02)
max_cycle_macro = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_cycle_macro', 50)
max_cycle_micro = getattr(__config__, 'mcscf_umc1step_UCASSCF_max_cycle_micro', 4)
conv_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_conv_tol', 1e-7)
conv_tol_grad = getattr(__config__, 'mcscf_umc1step_UCASSCF_conv_tol_grad', None)
# for augmented hessian
ah_level_shift = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_level_shift', 1e-8)
ah_conv_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_conv_tol', 1e-12)
ah_max_cycle = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_max_cycle', 30)
ah_lindep = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_lindep', 1e-14)
ah_start_tol = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_start_tol', 2.5)
ah_start_cycle = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_start_cycle', 3)
ah_grad_trust_region = getattr(__config__, 'mcscf_umc1step_UCASSCF_ah_grad_trust_region', 3.0)
internal_rotation = getattr(__config__, 'mcscf_umc1step_UCASSCF_internal_rotation', False)
ci_response_space = getattr(__config__, 'mcscf_umc1step_UCASSCF_ci_response_space', 4)
with_dep4 = getattr(__config__, 'mcscf_umc1step_UCASSCF_with_dep4', False)
chk_ci = getattr(__config__, 'mcscf_umc1step_UCASSCF_chk_ci', False)
kf_interval = getattr(__config__, 'mcscf_umc1step_UCASSCF_kf_interval', 4)
kf_trust_region = getattr(__config__, 'mcscf_umc1step_UCASSCF_kf_trust_region', 3.0)
natorb = getattr(__config__, 'mcscf_umc1step_UCASSCF_natorb', False)
#canonicalization = getattr(__config__, 'mcscf_umc1step_UCASSCF_canonicalization', True)
#sorting_mo_energy = getattr(__config__, 'mcscf_umc1step_UCASSCF_sorting_mo_energy', False)
def __init__(self, mf_or_mol, ncas, nelecas, ncore=None, frozen=None):
ucasci.UCASCI.__init__(self, mf_or_mol, ncas, nelecas, ncore)
self.frozen = frozen
self.callback = None
self.chkfile = self._scf.chkfile
self.fcisolver.max_cycle = getattr(__config__,
'mcscf_umc1step_UCASSCF_fcisolver_max_cycle', 50)
self.fcisolver.conv_tol = getattr(__config__,
'mcscf_umc1step_UCASSCF_fcisolver_conv_tol', 1e-8)
##################################################
# don't modify the following attributes, they are not input options
self.e_tot = None
self.e_cas = None
self.ci = None
self.mo_coeff = self._scf.mo_coeff
self.converged = False
self._max_stepsize = None
keys = set(('max_stepsize', 'max_cycle_macro', 'max_cycle_micro',
'conv_tol', 'conv_tol_grad', 'ah_level_shift',
'ah_conv_tol', 'ah_max_cycle', 'ah_lindep',
'ah_start_tol', 'ah_start_cycle', 'ah_grad_trust_region',
'internal_rotation', 'ci_response_space',
'with_dep4', 'chk_ci',
'kf_interval', 'kf_trust_region', 'fcisolver_max_cycle',
'fcisolver_conv_tol', 'natorb', 'canonicalization',
'sorting_mo_energy'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** UHF-CASSCF flags ********')
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
nvir_alpha = nmo - ncore[0] - ncas
nvir_beta = nmo - ncore[1] - ncas
log.info('CAS (%de+%de, %do), ncore = [%d+%d], nvir = [%d+%d]',
self.nelecas[0], self.nelecas[1], ncas,
ncore[0], ncore[1], nvir_alpha, nvir_beta)
if ncore[0] != ncore[1]:
log.warn('converge might be slow since num alpha core %d != num beta core %d',
ncore[0], ncore[1])
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max. macro cycles = %d', self.max_cycle_macro)
log.info('max. micro cycles = %d', self.max_cycle_micro)
log.info('conv_tol = %g', self.conv_tol)
log.info('conv_tol_grad = %s', self.conv_tol_grad)
log.info('max. orb step = %g', self.max_stepsize)
log.info('augmented hessian max_cycle = %d', self.ah_max_cycle)
log.info('augmented hessian conv_tol = %g', self.ah_conv_tol)
log.info('augmented hessian linear dependence = %g', self.ah_lindep)
log.info('augmented hessian level shift = %d', self.ah_level_shift)
log.info('augmented hessian start_tol = %g', self.ah_start_tol)
log.info('augmented hessian start_cycle = %d', self.ah_start_cycle)
log.info('augmented hessian grad_trust_region = %g', self.ah_grad_trust_region)
log.info('kf_trust_region = %g', self.kf_trust_region)
log.info('kf_interval = %d', self.kf_interval)
log.info('ci_response_space = %d', self.ci_response_space)
#log.info('diis = %s', self.diis)
log.info('chkfile = %s', self.chkfile)
#log.info('natorb = %s', self.natorb)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, pyscf.lib.current_memory()[0])
log.info('internal_rotation = %s', self.internal_rotation)
try:
self.fcisolver.dump_flags(self.verbose)
except AttributeError:
pass
def kernel(self, mo_coeff=None, ci0=None, callback=None, _kern=kernel):
if mo_coeff is None:
mo_coeff = self.mo_coeff
else:
self.mo_coeff = mo_coeff
if callback is None: callback = self.callback
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_tot, self.e_cas, self.ci, self.mo_coeff = \
_kern(self, mo_coeff,
tol=self.conv_tol, conv_tol_grad=self.conv_tol_grad,
ci0=ci0, callback=callback, verbose=self.verbose)
logger.note(self, 'UCASSCF energy = %.15g', self.e_tot)
#if self.verbose >= logger.INFO:
# self.analyze(mo_coeff, self.ci, verbose=self.verbose)
self._finalize()
return self.e_tot, self.e_cas, self.ci, self.mo_coeff
def mc1step(self, mo_coeff=None, ci0=None, callback=None):
return self.kernel(mo_coeff, ci0, callback)
def mc2step(self, mo_coeff=None, ci0=None, callback=None):
from pyscf.mcscf import umc2step
return self.kernel(mo_coeff, ci0, callback, umc2step.kernel)
def get_h2eff(self, mo_coeff=None):
'''Computing active space two-particle Hamiltonian.
'''
return self.get_h2cas(mo_coeff)
def get_h2cas(self, mo_coeff=None):
return ucasci.UCASCI.ao2mo(self, mo_coeff)
def casci(self, mo_coeff, ci0=None, eris=None, verbose=None, envs=None):
if eris is None:
fcasci = copy.copy(self)
fcasci.ao2mo = self.get_h2cas
else:
fcasci = _fake_h_for_fast_casci(self, mo_coeff, eris)
log = logger.new_logger(self, verbose)
e_tot, e_cas, fcivec = ucasci.kernel(fcasci, mo_coeff, ci0, log)
if envs is not None and log.verbose >= logger.INFO:
log.debug('CAS space CI energy = %.15g', e_cas)
if 'imicro' in envs: # Within CASSCF iteration
log.info('macro iter %d (%d JK %d micro), '
'UCASSCF E = %.15g dE = %.8g',
envs['imacro'], envs['njk'], envs['imicro'],
e_tot, e_tot-envs['elast'])
if 'norm_gci' in envs:
log.info(' |grad[o]|=%5.3g '
'|grad[c]|= %s |ddm|=%5.3g',
envs['norm_gorb0'],
envs['norm_gci'], envs['norm_ddm'])
else:
log.info(' |grad[o]|=%5.3g |ddm|=%5.3g',
envs['norm_gorb0'], envs['norm_ddm'])
else: # Initialization step
log.info('UCASCI E = %.15g', e_tot)
return e_tot, e_cas, fcivec
def uniq_var_indices(self, nmo, ncore, ncas, frozen):
nocc = ncore + ncas
mask = numpy.zeros((nmo,nmo),dtype=bool)
mask[ncore:nocc,:ncore] = True
mask[nocc:,:nocc] = True
if self.internal_rotation:
raise NotImplementedError('internal_rotation')
if frozen is not None:
if isinstance(frozen, (int, numpy.integer)):
mask[:frozen] = mask[:,:frozen] = False
else:
mask[frozen] = mask[:,frozen] = False
return mask
def pack_uniq_var(self, mat):
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
idxa = self.uniq_var_indices(nmo, ncore[0], ncas, self.frozen)
idxb = self.uniq_var_indices(nmo, ncore[1], ncas, self.frozen)
return numpy.hstack((mat[0][idxa], mat[1][idxb]))
# to anti symmetric matrix
def unpack_uniq_var(self, v):
nmo = self.mo_coeff[0].shape[1]
ncore = self.ncore
ncas = self.ncas
idx = numpy.empty((2,nmo,nmo), dtype=bool)
idx[0] = self.uniq_var_indices(nmo, ncore[0], ncas, self.frozen)
idx[1] = self.uniq_var_indices(nmo, ncore[1], ncas, self.frozen)
mat = numpy.zeros((2,nmo,nmo))
mat[idx] = v
mat[0] = mat[0] - mat[0].T
mat[1] = mat[1] - mat[1].T
return mat
def update_rotate_matrix(self, dx, u0=1):
if isinstance(u0, int) and u0 == 1:
u0 = (1,1)
dr = self.unpack_uniq_var(dx)
ua = numpy.dot(u0[0], expmat(dr[0]))
ub = numpy.dot(u0[1], expmat(dr[1]))
return (ua, ub)
def gen_g_hop(self, *args):
return gen_g_hop(self, *args)
rotate_orb_cc = rotate_orb_cc
def ao2mo(self, mo_coeff=None):
if mo_coeff is None: mo_coeff = self.mo_coeff
# nmo = mo[0].shape[1]
# ncore = self.ncore
# ncas = self.ncas
# nocc = (ncas + ncore[0], ncas + ncore[1])
# eriaa = pyscf.ao2mo.incore.full(self._scf._eri, mo[0])
# eriab = pyscf.ao2mo.incore.general(self._scf._eri, (mo[0],mo[0],mo[1],mo[1]))
# eribb = pyscf.ao2mo.incore.full(self._scf._eri, mo[1])
# eriaa = pyscf.ao2mo.restore(1, eriaa, nmo)
# eriab = pyscf.ao2mo.restore(1, eriab, nmo)
# eribb = pyscf.ao2mo.restore(1, eribb, nmo)
# eris = lambda:None
# eris.jkcpp = numpy.einsum('iipq->ipq', eriaa[:ncore[0],:ncore[0],:,:]) \
# - numpy.einsum('ipqi->ipq', eriaa[:ncore[0],:,:,:ncore[0]])
# eris.jkcPP = numpy.einsum('iipq->ipq', eribb[:ncore[1],:ncore[1],:,:]) \
# - numpy.einsum('ipqi->ipq', eribb[:ncore[1],:,:,:ncore[1]])
# eris.jC_pp = numpy.einsum('pqii->pq', eriab[:,:,:ncore[1],:ncore[1]])
# eris.jc_PP = numpy.einsum('iipq->pq', eriab[:ncore[0],:ncore[0],:,:])
# eris.aapp = numpy.copy(eriaa[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
# eris.aaPP = numpy.copy(eriab[ncore[0]:nocc[0],ncore[0]:nocc[0],:,:])
# eris.AApp = numpy.copy(eriab[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].transpose(2,3,0,1))
# eris.AAPP = numpy.copy(eribb[ncore[1]:nocc[1],ncore[1]:nocc[1],:,:])
# eris.appa = numpy.copy(eriaa[ncore[0]:nocc[0],:,:,ncore[0]:nocc[0]])
# eris.apPA = numpy.copy(eriab[ncore[0]:nocc[0],:,:,ncore[1]:nocc[1]])
# eris.APPA = numpy.copy(eribb[ncore[1]:nocc[1],:,:,ncore[1]:nocc[1]])
#
# eris.cvCV = numpy.copy(eriab[:ncore[0],ncore[0]:,:ncore[1],ncore[1]:])
# eris.Icvcv = eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:] * 2\
# - eriaa[:ncore[0],:ncore[0],ncore[0]:,ncore[0]:].transpose(0,3,1,2) \
# - eriaa[:ncore[0],ncore[0]:,:ncore[0],ncore[0]:].transpose(0,3,2,1)
# eris.ICVCV = eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:] * 2\
# - eribb[:ncore[1],:ncore[1],ncore[1]:,ncore[1]:].transpose(0,3,1,2) \
# - eribb[:ncore[1],ncore[1]:,:ncore[1],ncore[1]:].transpose(0,3,2,1)
#
# eris.Iapcv = eriaa[ncore[0]:nocc[0],:,:ncore[0],ncore[0]:] * 2 \
# - eriaa[:,ncore[0]:,:ncore[0],ncore[0]:nocc[0]].transpose(3,0,2,1) \
# - eriaa[:,:ncore[0],ncore[0]:,ncore[0]:nocc[0]].transpose(3,0,1,2)
# eris.IAPCV = eribb[ncore[1]:nocc[1],:,:ncore[1],ncore[1]:] * 2 \
# - eribb[:,ncore[1]:,:ncore[1],ncore[1]:nocc[1]].transpose(3,0,2,1) \
# - eribb[:,:ncore[1],ncore[1]:,ncore[1]:nocc[1]].transpose(3,0,1,2)
# eris.apCV = numpy.copy(eriab[ncore[0]:nocc[0],:,:ncore[1],ncore[1]:])
# eris.APcv = numpy.copy(eriab[:ncore[0],ncore[0]:,ncore[1]:nocc[1],:].transpose(2,3,0,1))
# return eris
return umc_ao2mo._ERIS(self, mo_coeff)
def update_jk_in_ah(self, mo, r, casdm1s, eris):
ncas = self.ncas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
ra, rb = r
vhf3ca = numpy.einsum('srqp,sr->qp', eris.Icvcv, ra[:ncore[0],ncore[0]:])
vhf3ca += numpy.einsum('qpsr,sr->qp', eris.cvCV, rb[:ncore[1],ncore[1]:]) * 2
vhf3cb = numpy.einsum('srqp,sr->qp', eris.ICVCV, rb[:ncore[1],ncore[1]:])
vhf3cb += numpy.einsum('srqp,sr->qp', eris.cvCV, ra[:ncore[0],ncore[0]:]) * 2
vhf3aa = numpy.einsum('kpsr,sr->kp', eris.Iapcv, ra[:ncore[0],ncore[0]:])
vhf3aa += numpy.einsum('kpsr,sr->kp', eris.apCV, rb[:ncore[1],ncore[1]:]) * 2
vhf3ab = numpy.einsum('kpsr,sr->kp', eris.IAPCV, rb[:ncore[1],ncore[1]:])
vhf3ab += numpy.einsum('kpsr,sr->kp', eris.APcv, ra[:ncore[0],ncore[0]:]) * 2
dm4 = (numpy.dot(casdm1s[0], ra[ncore[0]:nocc[0]]),
numpy.dot(casdm1s[1], rb[ncore[1]:nocc[1]]))
vhf4a = numpy.einsum('krqp,kr->qp', eris.Iapcv, dm4[0])
vhf4a += numpy.einsum('krqp,kr->qp', eris.APcv, dm4[1]) * 2
vhf4b = numpy.einsum('krqp,kr->qp', eris.IAPCV, dm4[1])
vhf4b += numpy.einsum('krqp,kr->qp', eris.apCV, dm4[0]) * 2
va = (numpy.dot(casdm1s[0], vhf3aa), numpy.dot(casdm1s[1], vhf3ab))
vc = (vhf3ca + vhf4a, vhf3cb + vhf4b)
return va, vc
def update_casdm(self, mo, u, fcivec, e_cas, eris):
ecore, h1cas, h2cas = self.approx_cas_integral(mo, u, eris)
ci1, g = self.solve_approx_ci(h1cas, h2cas, fcivec, ecore, e_cas)
casdm1, casdm2 = self.fcisolver.make_rdm12s(ci1, self.ncas, self.nelecas)
return casdm1, casdm2, g, ci1
def approx_cas_integral(self, mo, u, eris):
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
nmo = mo[0].shape[1]
rmat = u - numpy.eye(nmo)
mocas = (mo[0][:,ncore[0]:nocc[0]], mo[1][:,ncore[1]:nocc[1]])
hcore = self.get_hcore()
h1effa = reduce(numpy.dot, (rmat[0][:,:nocc[0]].T, mo[0].T,
hcore[0], mo[0][:,:nocc[0]]))
h1effb = reduce(numpy.dot, (rmat[1][:,:nocc[1]].T, mo[1].T,
hcore[1], mo[1][:,:nocc[1]]))
h1effa = h1effa + h1effa.T
h1effb = h1effb + h1effb.T
aapc = eris.aapp[:,:,:,:ncore[0]]
aaPC = eris.aaPP[:,:,:,:ncore[1]]
AApc = eris.AApp[:,:,:,:ncore[0]]
AAPC = eris.AAPP[:,:,:,:ncore[1]]
apca = eris.appa[:,:,:ncore[0],:]
APCA = eris.APPA[:,:,:ncore[1],:]
jka = numpy.einsum('iup->up', eris.jkcpp[:,:nocc[0]]) + eris.jC_pp[:nocc[0]]
v1a =(numpy.einsum('up,pv->uv', jka[ncore[0]:], rmat[0][:,ncore[0]:nocc[0]])
+ numpy.einsum('uvpi,pi->uv', aapc-apca.transpose(0,3,1,2), rmat[0][:,:ncore[0]])
+ numpy.einsum('uvpi,pi->uv', aaPC, rmat[1][:,:ncore[1]]))
jkb = numpy.einsum('iup->up', eris.jkcPP[:,:nocc[1]]) + eris.jc_PP[:nocc[1]]
v1b =(numpy.einsum('up,pv->uv', jkb[ncore[1]:], rmat[1][:,ncore[1]:nocc[1]])
+ numpy.einsum('uvpi,pi->uv', AApc, rmat[0][:,:ncore[0]])
+ numpy.einsum('uvpi,pi->uv', AAPC-APCA.transpose(0,3,1,2), rmat[1][:,:ncore[1]]))
h1casa =(h1effa[ncore[0]:,ncore[0]:] + (v1a + v1a.T)
+ reduce(numpy.dot, (mocas[0].T, hcore[0], mocas[0]))
+ eris.vhf_c[0][ncore[0]:nocc[0],ncore[0]:nocc[0]])
h1casb =(h1effb[ncore[1]:,ncore[1]:] + (v1b + v1b.T)
+ reduce(numpy.dot, (mocas[1].T, hcore[1], mocas[1]))
+ eris.vhf_c[1][ncore[1]:nocc[1],ncore[1]:nocc[1]])
h1cas = (h1casa, h1casb)
aaap = eris.aapp[:,:,ncore[0]:nocc[0],:]
aaAP = eris.aaPP[:,:,ncore[1]:nocc[1],:]
AAap = eris.AApp[:,:,ncore[1]:nocc[1],:]
AAAP = eris.AAPP[:,:,ncore[1]:nocc[1],:]
aaaa = numpy.einsum('tuvp,pw->tuvw', aaap, rmat[0][:,ncore[0]:nocc[0]])
aaaa = aaaa + aaaa.transpose(0,1,3,2)
aaaa = aaaa + aaaa.transpose(2,3,0,1)
aaaa += aaap[:,:,:,ncore[0]:nocc[0]]
AAAA = numpy.einsum('tuvp,pw->tuvw', AAAP, rmat[1][:,ncore[1]:nocc[1]])
AAAA = AAAA + AAAA.transpose(0,1,3,2)
AAAA = AAAA + AAAA.transpose(2,3,0,1)
AAAA += AAAP[:,:,:,ncore[1]:nocc[1]]
tmp = (numpy.einsum('vwtp,pu->tuvw', AAap, rmat[0][:,ncore[0]:nocc[0]]),
numpy.einsum('tuvp,pw->tuvw', aaAP, rmat[1][:,ncore[1]:nocc[1]]))
aaAA =(tmp[0] + tmp[0].transpose(1,0,2,3)
+ tmp[1] + tmp[1].transpose(0,1,3,2))
aaAA += aaAP[:,:,:,ncore[1]:nocc[1]]
# pure core response
ecore =(h1effa[:ncore[0]].trace() + h1effb[:ncore[1]].trace()
+ numpy.einsum('jp,pj->', jka[:ncore[0]], rmat[0][:,:ncore[0]])*2
+ numpy.einsum('jp,pj->', jkb[:ncore[1]], rmat[1][:,:ncore[1]])*2)
return ecore, h1cas, (aaaa, aaAA, AAAA)
def solve_approx_ci(self, h1, h2, ci0, ecore, e_cas):
''' Solve CI eigenvalue/response problem approximately
'''
ncas = self.ncas
nelecas = self.nelecas
ncore = self.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
if getattr(self.fcisolver, 'approx_kernel', None):
ci1 = self.fcisolver.approx_kernel(h1, h2, ncas, nelecas, ci0=ci0)[1]
return ci1, None
h2eff = self.fcisolver.absorb_h1e(h1, h2, ncas, nelecas, .5)
hc = self.fcisolver.contract_2e(h2eff, ci0, ncas, nelecas).ravel()
g = hc - (e_cas-ecore) * ci0.ravel()
if self.ci_response_space > 6:
logger.debug(self, 'CI step by full response')
# full response
e, ci1 = self.fcisolver.kernel(h1, h2, ncas, nelecas, ci0=ci0,
max_memory=self.max_memory)
else:
nd = min(max(self.ci_response_space, 2), ci0.size)
logger.debug(self, 'CI step by %dD subspace response', nd)
xs = [ci0.ravel()]
ax = [hc]
heff = numpy.empty((nd,nd))
seff = numpy.empty((nd,nd))
heff[0,0] = numpy.dot(xs[0], ax[0])
seff[0,0] = 1
for i in range(1, nd):
xs.append(ax[i-1] - xs[i-1] * e_cas)
ax.append(self.fcisolver.contract_2e(h2eff, xs[i], ncas,
nelecas).ravel())
for j in range(i+1):
heff[i,j] = heff[j,i] = numpy.dot(xs[i], ax[j])
seff[i,j] = seff[j,i] = numpy.dot(xs[i], xs[j])
e, v = pyscf.lib.safe_eigh(heff, seff)[:2]
ci1 = 0
for i in range(nd):
ci1 += xs[i] * v[i,0]
return ci1, g
def dump_chk(self, envs):
if not self.chkfile:
return self
if self.chk_ci:
civec = envs['fcivec']
else:
civec = None
ncore = self.ncore
ncas = self.ncas
nocca = ncore[0] + ncas
noccb = ncore[1] + ncas
if 'mo' in envs:
mo_coeff = envs['mo']
else:
mo_coeff = envs['mo']
mo_occ = numpy.zeros((2,envs['mo'][0].shape[1]))
mo_occ[0,:ncore[0]] = 1
mo_occ[1,:ncore[1]] = 1
if self.natorb:
occa, ucas = self._eig(-envs['casdm1'][0], ncore[0], nocca)
occb, ucas = self._eig(-envs['casdm1'][1], ncore[1], noccb)
mo_occ[0,ncore[0]:nocca] = -occa
mo_occ[1,ncore[1]:noccb] = -occb
else:
mo_occ[0,ncore[0]:nocca] = envs['casdm1'][0].diagonal()
mo_occ[1,ncore[1]:noccb] = envs['casdm1'][1].diagonal()
mo_energy = 'None'
chkfile.dump_mcscf(self, self.chkfile, 'mcscf', envs['e_tot'],
mo_coeff, ncore, ncas, mo_occ,
mo_energy, envs['e_cas'], civec, envs['casdm1'],
overwrite_mol=False)
return self
def rotate_mo(self, mo, u, log=None):
'''Rotate orbitals with the given unitary matrix'''
mo_a = numpy.dot(mo[0], u[0])
mo_b = numpy.dot(mo[1], u[1])
if log is not None and log.verbose >= logger.DEBUG:
ncore = self.ncore[0]
ncas = self.ncas
nocc = ncore + ncas
s = reduce(numpy.dot, (mo_a[:,ncore:nocc].T, self._scf.get_ovlp(),
self.mo_coeff[0][:,ncore:nocc]))
log.debug('Alpha active space overlap to initial guess, SVD = %s',
numpy.linalg.svd(s)[1])
log.debug('Alpha active space overlap to last step, SVD = %s',
numpy.linalg.svd(u[0][ncore:nocc,ncore:nocc])[1])
return mo_a, mo_b
def micro_cycle_scheduler(self, envs):
#log_norm_ddm = numpy.log(envs['norm_ddm'])
#return max(self.max_cycle_micro, int(self.max_cycle_micro-1-log_norm_ddm))
return self.max_cycle_micro
def max_stepsize_scheduler(self, envs):
if self._max_stepsize is None:
self._max_stepsize = self.max_stepsize
if envs['de'] > self.conv_tol: # Avoid total energy increasing
self._max_stepsize *= .5
logger.debug(self, 'set max_stepsize to %g', self._max_stepsize)
else:
self._max_stepsize = numpy.sqrt(self.max_stepsize*self.max_stepsize)
return self._max_stepsize
@property
def max_orb_stepsize(self): # pragma: no cover
return self.max_stepsize
@max_orb_stepsize.setter
def max_orb_stepsize(self, x): # pragma: no cover
sys.stderr.write('WARN: Attribute "max_orb_stepsize" was replaced by "max_stepsize"\n')
self.max_stepsize = x
CASSCF = UCASSCF
# to avoid calculating AO integrals
def _fake_h_for_fast_casci(casscf, mo, eris):
mc = copy.copy(casscf)
mc.mo_coeff = mo
# vhf for core density matrix
s = mc._scf.get_ovlp()
mo_inv = (numpy.dot(mo[0].T, s), numpy.dot(mo[1].T, s))
vjk =(numpy.einsum('ipq->pq', eris.jkcpp) + eris.jC_pp,
numpy.einsum('ipq->pq', eris.jkcPP) + eris.jc_PP)
vhf =(reduce(numpy.dot, (mo_inv[0].T, vjk[0], mo_inv[0])),
reduce(numpy.dot, (mo_inv[1].T, vjk[1], mo_inv[1])))
mc.get_veff = lambda *args: vhf
ncas = casscf.ncas
ncore = casscf.ncore
nocc = (ncas + ncore[0], ncas + ncore[1])
eri_cas = (eris.aapp[:,:,ncore[0]:nocc[0],ncore[0]:nocc[0]].copy(), \
eris.aaPP[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].copy(),
eris.AAPP[:,:,ncore[1]:nocc[1],ncore[1]:nocc[1]].copy())
mc.get_h2eff = lambda *args: eri_cas
return mc
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.mcscf import addons
mol = gto.Mole()
mol.verbose = 0
mol.output = None#"out_h2o"
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-1. )],
['H', ( 0.,-0.5 ,-0. )],
['H', ( 0.,-0. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0., 1. , 1. )],
]
mol.basis = {'H': 'sto-3g'}
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = UCASSCF(m, 4, (2,1))
#mo = m.mo_coeff
mo = addons.sort_mo(mc, m.mo_coeff, [(3,4,5,6),(3,4,6,7)], 1)
emc = kernel(mc, mo, verbose=4)[1]
print(ehf, emc, emc-ehf)
print(emc - -2.9782774463926618)
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = {'H': 'cc-pvdz',
'O': 'cc-pvdz',}
mol.symmetry = 1
mol.charge = 1
mol.spin = 1
mol.build()
m = scf.UHF(mol)
ehf = m.scf()
mc = UCASSCF(m, 4, (2,1))
mc.verbose = 4
emc = mc.mc1step()[0]
print(ehf, emc, emc-ehf)
print(emc - -75.5644202701263, emc - -75.573930418500652,
emc - -75.574137883405612, emc - -75.648547447838951)
mc = UCASSCF(m, 4, (2,1))
mc.verbose = 4
mo = mc.sort_mo((3,4,6,7))
emc = mc.mc1step(mo)[0]
print(ehf, emc, emc-ehf)
print(emc - -75.5644202701263, emc - -75.573930418500652,
emc - -75.574137883405612, emc - -75.648547447838951)
|
|
import numpy as np
import matplotlib.pyplot as plt
import time
import sys
# Functions to sample the diffusion-weighted gradient directions
from dipy.core.sphere import disperse_charges, HemiSphere
# Function to reconstruct the tables with the acquisition information
from dipy.core.gradients import gradient_table
# Functions to perform simulations based on multi-compartment models
from dipy.sims.voxel import multi_tensor
# Import Dipy's procedures to process diffusion tensor
import dipy.reconst.dti as dti
# Importing procedures to fit the free water elimination DTI model
from functions import nls_fit_tensor
# ---------------------------------------------------------------
print('Defining the acquistion parameters...')
# ---------------------------------------------------------------
# Sample the spherical cordinates of 32 random diffusion-weighted
# directions.
n_pts = 32
theta = np.pi * np.random.rand(n_pts)
phi = 2 * np.pi * np.random.rand(n_pts)
# Convert direction to cartesian coordinates. For this, Dipy's
# class object HemiSphere is used. Since diffusion possess central
# symmetric, this class object also projects the direction to an
# Hemisphere.
hsph_initial = HemiSphere(theta=theta, phi=phi)
# By using a electrostatic potential energy algorithm, the directions
# of the HemiSphere class object are moved util them are evenly
# distributed in the Hemi-sphere
hsph_updated, potential = disperse_charges(hsph_initial, 5000)
directions = hsph_updated.vertices
# Based on the evenly sampled directions, the acquistion parameters are
# simulated. Vector bvals containts the information of the b-values
# while matrix bvecs contains all gradient directions for all b-value
# repetitions.
bvals = np.hstack((np.zeros(6), 500 * np.ones(n_pts), 1500 * np.ones(n_pts)))
bvecs = np.vstack((np.zeros((6, 3)), directions, directions))
# bvals and bvecs are converted according to Dipy's accepted format using
# Dipy's function gradient_table
gtab = gradient_table(bvals, bvecs)
# The SNR is defined according to Hoy et al, 2014
SNR = 40
# ---------------------------------------------------------------
print('Defining the ground truth values for all simulations repetitions...')
# ---------------------------------------------------------------
# Simulations are repeated for 11 free water volume fractions
VF = np.linspace(0, 100, num=11)
# The value of free water diffusion is set to its known value
Dwater = 3e-3
# Simulations are repeated for 5 levels of fractional anisotropy
FA = np.array([0.71, 0.30, 0.22, 0.11, 0.])
L1 = np.array([1.6e-3, 1.080e-3, 1.000e-3, 0.900e-3, 0.8e-03])
L2 = np.array([0.5e-3, 0.695e-3, 0.725e-3, 0.763e-3, 0.8e-03])
L3 = np.array([0.3e-3, 0.625e-3, 0.675e-3, 0.738e-3, 0.8e-03])
# According to Hoy et al., simulations are repeated for 120 different
# diffusion tensor directions (and each direction repeated 100 times).
nDTdirs = 120
nrep = 100
# These directions are sampled using the same procedure used
# to evenly sample the diffusion gradient directions
theta = np.pi * np.random.rand(nDTdirs)
phi = 2 * np.pi * np.random.rand(nDTdirs)
hsph_initial = HemiSphere(theta=theta, phi=phi)
hsph_updated, potential = disperse_charges(hsph_initial, 5000)
DTdirs = hsph_updated.vertices
# ----------------------------------------------------------------
print('Generating simulations...')
# ----------------------------------------------------------------
# Initializing a matrix to save all synthetic diffusion-weighted
# signals. Each dimension of this matrix corresponds to the number
# of simulated FA levels, volume fractions, diffusion tensor
# directions, and diffusion-weighted signals of the given
# gradient table
DWI_simulates = np.empty((FA.size, VF.size, nrep * nDTdirs,
bvals.size))
for fa_i in range(FA.size):
# selecting the diffusion eigenvalues for a given FA level
mevals = np.array([[L1[fa_i], L2[fa_i], L3[fa_i]],
[Dwater, Dwater, Dwater]])
for vf_i in range(VF.size):
# estimating volume fractions for both simulations
# compartments
fractions = [100 - VF[vf_i], VF[vf_i]]
for di in range(nDTdirs):
# Select a diffusion tensor direction
d = DTdirs[di]
# Repeat simulations for the given directions
for s_i in np.arange(di * nrep, (di+1) * nrep):
# Multi-compartmental simulations are done using
# Dipy's function multi_tensor
signal, sticks = multi_tensor(gtab, mevals,
S0=100,
angles=[d, (1, 0, 0)],
fractions=fractions,
snr=SNR)
DWI_simulates[fa_i, vf_i, s_i, :] = signal
prog = (fa_i+1.0) / FA.size * 100
time.sleep(1)
sys.stdout.write("\r%f%%" % prog)
sys.stdout.flush()
# ----------------------------------------------------------------
print('Fitting the free water DTI model...')
# ----------------------------------------------------------------
# All simulations are fitted simultaneously using function nls_fit_tensor
t0 = time.time()
fw_params = nls_fit_tensor(gtab, DWI_simulates, Diso=Dwater)
dt = time.time() - t0
print("This step took %f seconds to run" % dt)
# ----------------------------------------------------------------
print('Compute FA and f-value statistics and save results...')
# ----------------------------------------------------------------
fig, axs = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
# Compute the tissue's diffusion tensor fractional anisotropy
# using function fractional_anisotropy of Dipy's module dti
fa = dti.fractional_anisotropy(fw_params[..., :3])
f = fw_params[..., 12]
# Initializing vectors for FA statistics
median_fa = np.empty(VF.size)
lower_p = np.empty(VF.size)
upper_p = np.empty(VF.size)
# Defining the colors of the figure
colors = {0: 'r', 1: 'magenta', 2: 'black', 3: 'b', 4: 'g'}
for fa_i in range(FA.size):
for vf_i in range(VF.size):
# Compute FA statistics for a given ground truth FA
# level and a water volume fraction
median_fa[vf_i] = np.median(fa[fa_i, vf_i, :])
p25, p75 = np.percentile(fa[fa_i, vf_i, :], [25, 75])
lower_p[vf_i] = median_fa[vf_i] - p25
lower_p[vf_i] = p75 - median_fa[vf_i]
# Plot FA statistics as a function of the ground truth
# water volume fraction
axs[0, 0].errorbar(VF/100, median_fa, fmt='.',
yerr=[lower_p, lower_p],
color=colors[fa_i],
ecolor=colors[fa_i],
linewidth=1.0,
label='$FA: %.2f$' % FA[fa_i])
# Adjust properties of the first panel of the figure
axs[0, 0].set_ylim([-0.1, 1.2])
axs[0, 0].set_xlim([-0.1, 1.2])
axs[0, 0].set_xlabel('Simulated f-value')
axs[0, 0].set_ylabel('Estimated FA')
axs[0, 0].legend(loc='center left', bbox_to_anchor=(1, 0.5))
# Turn off the upper right panel since it is not used.
axs[0, 1].axis('off')
# Initializing vectors for volume fraction statistics
median_f = np.empty(VF.size)
lower_p = np.empty(VF.size)
upper_p = np.empty(VF.size)
for idx, fa_i in enumerate([0, 4]):
for vf_i in range(VF.size):
# Compute FA statistics for a given ground truth FA
# level and a water volume fraction. Note that only
# the extreme FA values are plotted.
median_f[vf_i] = np.median(f[fa_i, vf_i, :])
p25, p75 = np.percentile(f[fa_i, vf_i, :], [25, 75])
lower_p[vf_i] = median_f[vf_i] - p25
lower_p[vf_i] = p75 - median_f[vf_i]
# Plot the water volume fraction statistics as a function
# of its ground truth value in a lower panel of the
# figure.
axs[1, idx].errorbar(VF/100, median_f, fmt='.',
yerr=[lower_p, lower_p],
color=colors[fa_i],
ecolor=colors[fa_i],
linewidth=3.0,
label='$FA: %.2f$' % FA[fa_i])
# plot identity lines
axs[1, idx].plot([0, 1], [0, 1], 'b', label='Simulated f-value')
# Adjust properties of a given lower panel of the figure
axs[1, idx].legend(loc='upper left')
axs[1, idx].set_ylim([-0.1, 1.2])
axs[1, idx].set_xlim([-0.1, 1.2])
axs[1, idx].set_xlabel('Simulated f-value')
axs[1, idx].set_ylabel('Estimated f-value')
# Save figure
fig.savefig('fwdti_simulations_1.png')
print('done!')
|
|
import unittest
from test import test_support
import posixpath, os
from posixpath import realpath, abspath, join, dirname, basename, relpath
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(test_support.TESTFN)
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
test_support.unlink(test_support.TESTFN + suffix)
safe_rmdir(test_support.TESTFN + suffix)
def assertIs(self, a, b):
self.assert_(a is b)
def test_normcase(self):
# Check that normcase() is idempotent
p = "FoO/./BaR"
p = posixpath.normcase(p)
self.assertEqual(p, posixpath.normcase(p))
self.assertRaises(TypeError, posixpath.normcase)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"), "/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"), "/foo/bar/baz/")
self.assertRaises(TypeError, posixpath.join)
def test_splitdrive(self):
self.assertEqual(posixpath.splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertRaises(TypeError, posixpath.splitdrive)
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertRaises(TypeError, posixpath.split)
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path), ("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path), ("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path), ("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"), (filename + ext + "/", ""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
self.assertRaises(TypeError, posixpath.splitext)
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertRaises(TypeError, posixpath.isabs)
def test_splitdrive(self):
self.assertEqual(posixpath.splitdrive("/foo/bar"), ("", "/foo/bar"))
self.assertRaises(TypeError, posixpath.splitdrive)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertRaises(TypeError, posixpath.basename)
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertRaises(TypeError, posixpath.dirname)
def test_commonprefix(self):
self.assertEqual(
posixpath.commonprefix([]),
""
)
self.assertEqual(
posixpath.commonprefix(["/home/swenson/spam", "/home/swen/spam"]),
"/home/swen"
)
self.assertEqual(
posixpath.commonprefix(["/home/swen/spam", "/home/swen/eggs"]),
"/home/swen/"
)
self.assertEqual(
posixpath.commonprefix(["/home/swen/spam", "/home/swen/spam"]),
"/home/swen/spam"
)
testlist = ['', 'abc', 'Xbcd', 'Xb', 'XY', 'abcd', 'aXc', 'abd', 'ab', 'aX', 'abcX']
for s1 in testlist:
for s2 in testlist:
p = posixpath.commonprefix([s1, s2])
self.assert_(s1.startswith(p))
self.assert_(s2.startswith(p))
if s1 != s2:
n = len(p)
self.assertNotEqual(s1[n:n+1], s2[n:n+1])
def test_getsize(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertEqual(posixpath.getsize(test_support.TESTFN), 3)
finally:
if not f.closed:
f.close()
def test_time(self):
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
f = open(test_support.TESTFN, "ab")
f.write("bar")
f.close()
f = open(test_support.TESTFN, "rb")
d = f.read()
f.close()
self.assertEqual(d, "foobar")
self.assert_(
posixpath.getctime(test_support.TESTFN) <=
posixpath.getmtime(test_support.TESTFN)
)
finally:
if not f.closed:
f.close()
def test_islink(self):
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.islink(test_support.TESTFN + "1"), False)
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
os.remove(test_support.TESTFN + "1")
self.assertIs(posixpath.islink(test_support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(test_support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(test_support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.islink)
def test_exists(self):
self.assertIs(posixpath.exists(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.exists(test_support.TESTFN), True)
self.assertIs(posixpath.lexists(test_support.TESTFN), True)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.exists)
def test_isdir(self):
self.assertIs(posixpath.isdir(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.isdir(test_support.TESTFN), False)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(posixpath.isdir(test_support.TESTFN), True)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.isdir)
def test_isfile(self):
self.assertIs(posixpath.isfile(test_support.TESTFN), False)
f = open(test_support.TESTFN, "wb")
try:
f.write("foo")
f.close()
self.assertIs(posixpath.isfile(test_support.TESTFN), True)
os.remove(test_support.TESTFN)
os.mkdir(test_support.TESTFN)
self.assertIs(posixpath.isfile(test_support.TESTFN), False)
os.rmdir(test_support.TESTFN)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.isdir)
def test_samefile(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "1"
),
True
)
# If we don't have links, assume that os.stat doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
os.symlink(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
)
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samefile(
test_support.TESTFN + "1",
test_support.TESTFN + "2"
),
False
)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.samefile)
def test_samestat(self):
f = open(test_support.TESTFN + "1", "wb")
try:
f.write("foo")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "1")
),
True
)
# If we don't have links, assume that os.stat() doesn't return resonable
# inode information and thus, that samefile() doesn't work
if hasattr(os, "symlink"):
if hasattr(os, "symlink"):
os.symlink(test_support.TESTFN + "1", test_support.TESTFN + "2")
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
True
)
os.remove(test_support.TESTFN + "2")
f = open(test_support.TESTFN + "2", "wb")
f.write("bar")
f.close()
self.assertIs(
posixpath.samestat(
os.stat(test_support.TESTFN + "1"),
os.stat(test_support.TESTFN + "2")
),
False
)
finally:
if not f.close():
f.close()
self.assertRaises(TypeError, posixpath.samestat)
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
self.assertRaises(TypeError, posixpath.ismount)
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
try:
import pwd
except ImportError:
pass
else:
self.assert_(isinstance(posixpath.expanduser("~/"), basestring))
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assert_(isinstance(posixpath.expanduser("~root/"), basestring))
self.assert_(isinstance(posixpath.expanduser("~foo/"), basestring))
self.assertRaises(TypeError, posixpath.expanduser)
def test_expandvars(self):
oldenv = os.environ.copy()
try:
os.environ.clear()
os.environ["foo"] = "bar"
os.environ["{foo"] = "baz1"
os.environ["{foo}"] = "baz2"
self.assertEqual(posixpath.expandvars("foo"), "foo")
self.assertEqual(posixpath.expandvars("$foo bar"), "bar bar")
self.assertEqual(posixpath.expandvars("${foo}bar"), "barbar")
self.assertEqual(posixpath.expandvars("$[foo]bar"), "$[foo]bar")
self.assertEqual(posixpath.expandvars("$bar bar"), "$bar bar")
self.assertEqual(posixpath.expandvars("$?bar"), "$?bar")
self.assertEqual(posixpath.expandvars("${foo}bar"), "barbar")
self.assertEqual(posixpath.expandvars("$foo}bar"), "bar}bar")
self.assertEqual(posixpath.expandvars("${foo"), "${foo")
self.assertEqual(posixpath.expandvars("${{foo}}"), "baz1}")
self.assertEqual(posixpath.expandvars("$foo$foo"), "barbar")
self.assertEqual(posixpath.expandvars("$bar$bar"), "$bar$bar")
finally:
os.environ.clear()
os.environ.update(oldenv)
self.assertRaises(TypeError, posixpath.expandvars)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"), "/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertRaises(TypeError, posixpath.normpath)
def test_abspath(self):
self.assert_("foo" in posixpath.abspath("foo"))
self.assertRaises(TypeError, posixpath.abspath)
def test_realpath(self):
self.assert_("foo" in realpath("foo"))
self.assertRaises(TypeError, posixpath.realpath)
if hasattr(os, "symlink"):
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
test_support.unlink(ABSTFN)
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
old_path = abspath('.')
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
# Test using relative path as well.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN)
test_support.unlink(ABSTFN+"1")
test_support.unlink(ABSTFN+"2")
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
os.chdir(ABSTFN + "/k")
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
os.chdir(dirname(ABSTFN))
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."), ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
old_path = abspath('.')
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
os.chdir(dirname(ABSTFN))
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
os.chdir(old_path)
test_support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"), "../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
finally:
os.getcwd = real_getcwd
def test_main():
test_support.run_unittest(PosixPathTest)
if __name__=="__main__":
test_main()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2014, Jens Depuydt <http://www.jensd.be>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_lang
short_description: Adds, removes or changes procedural languages with a PostgreSQL database
description:
- Adds, removes or changes procedural languages with a PostgreSQL database.
- This module allows you to add a language, remote a language or change the trust
relationship with a PostgreSQL database.
- The module can be used on the machine where executed or on a remote host.
- When removing a language from a database, it is possible that dependencies prevent
the database from being removed. In that case, you can specify I(cascade=yes) to
automatically drop objects that depend on the language (such as functions in the
language).
- In case the language can't be deleted because it is required by the
database system, you can specify I(fail_on_drop=no) to ignore the error.
- Be careful when marking a language as trusted since this could be a potential
security breach. Untrusted languages allow only users with the PostgreSQL superuser
privilege to use this language to create new functions.
version_added: '1.7'
options:
lang:
description:
- Name of the procedural language to add, remove or change.
required: true
type: str
aliases:
- name
trust:
description:
- Make this language trusted for the selected db.
type: bool
default: 'no'
db:
description:
- Name of database to connect to and where the language will be added, removed or changed.
type: str
aliases:
- login_db
force_trust:
description:
- Marks the language as trusted, even if it's marked as untrusted in pg_pltemplate.
- Use with care!
type: bool
default: 'no'
fail_on_drop:
description:
- If C(yes), fail when removing a language. Otherwise just log and continue.
- In some cases, it is not possible to remove a language (used by the db-system).
- When dependencies block the removal, consider using I(cascade).
type: bool
default: 'yes'
cascade:
description:
- When dropping a language, also delete object that depend on this language.
- Only used when I(state=absent).
type: bool
default: 'no'
session_role:
version_added: '2.8'
description:
- Switch to session_role after connecting.
- The specified I(session_role) must be a role that the current I(login_user) is a member of.
- Permissions checking for SQL commands is carried out as though the I(session_role) were the one that had logged in originally.
type: str
state:
description:
- The state of the language for the selected database.
type: str
default: present
choices: [ absent, present ]
login_unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
version_added: '2.8'
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See U(https://www.postgresql.org/docs/current/static/libpq-ssl.html) for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
version_added: '2.8'
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
type: str
aliases: [ ssl_rootcert ]
version_added: '2.8'
owner:
description:
- Set an owner for the language.
- Ignored when I(state=absent).
type: str
version_added: '2.10'
seealso:
- name: PostgreSQL languages
description: General information about PostgreSQL languages.
link: https://www.postgresql.org/docs/current/xplang.html
- name: CREATE LANGUAGE reference
description: Complete reference of the CREATE LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-createlanguage.html
- name: ALTER LANGUAGE reference
description: Complete reference of the ALTER LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-alterlanguage.html
- name: DROP LANGUAGE reference
description: Complete reference of the DROP LANGUAGE command documentation.
link: https://www.postgresql.org/docs/current/sql-droplanguage.html
author:
- Jens Depuydt (@jensdepuydt)
- Thomas O'Donnell (@andytom)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Add language pltclu to database testdb if it doesn't exist
postgresql_lang: db=testdb lang=pltclu state=present
# Add language pltclu to database testdb if it doesn't exist and mark it as trusted.
# Marks the language as trusted if it exists but isn't trusted yet.
# force_trust makes sure that the language will be marked as trusted
- name: Add language pltclu to database testdb if it doesn't exist and mark it as trusted
postgresql_lang:
db: testdb
lang: pltclu
state: present
trust: yes
force_trust: yes
- name: Remove language pltclu from database testdb
postgresql_lang:
db: testdb
lang: pltclu
state: absent
- name: Remove language pltclu from database testdb and remove all dependencies
postgresql_lang:
db: testdb
lang: pltclu
state: absent
cascade: yes
- name: Remove language c from database testdb but ignore errors if something prevents the removal
postgresql_lang:
db: testdb
lang: pltclu
state: absent
fail_on_drop: no
- name: In testdb change owner of mylang to alice
postgresql_lang:
db: testdb
lang: mylang
owner: alice
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['CREATE LANGUAGE "acme"']
version_added: '2.8'
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.postgres import (
connect_to_db,
get_conn_params,
postgres_common_argument_spec,
)
executed_queries = []
def lang_exists(cursor, lang):
"""Checks if language exists for db"""
query = "SELECT lanname FROM pg_language WHERE lanname = '%s'" % lang
cursor.execute(query)
return cursor.rowcount > 0
def lang_istrusted(cursor, lang):
"""Checks if language is trusted for db"""
query = "SELECT lanpltrusted FROM pg_language WHERE lanname = '%s'" % lang
cursor.execute(query)
return cursor.fetchone()[0]
def lang_altertrust(cursor, lang, trust):
"""Changes if language is trusted for db"""
query = "UPDATE pg_language SET lanpltrusted = '%s' WHERE lanname = '%s'" % (trust, lang)
executed_queries.append(query)
cursor.execute(query)
return True
def lang_add(cursor, lang, trust):
"""Adds language for db"""
if trust:
query = 'CREATE TRUSTED LANGUAGE "%s"' % lang
else:
query = 'CREATE LANGUAGE "%s"' % lang
executed_queries.append(query)
cursor.execute(query)
return True
def lang_drop(cursor, lang, cascade):
"""Drops language for db"""
cursor.execute("SAVEPOINT ansible_pgsql_lang_drop")
try:
if cascade:
query = "DROP LANGUAGE \"%s\" CASCADE" % lang
else:
query = "DROP LANGUAGE \"%s\"" % lang
executed_queries.append(query)
cursor.execute(query)
except Exception:
cursor.execute("ROLLBACK TO SAVEPOINT ansible_pgsql_lang_drop")
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return False
cursor.execute("RELEASE SAVEPOINT ansible_pgsql_lang_drop")
return True
def get_lang_owner(cursor, lang):
"""Get language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
"""
query = ("SELECT r.rolname FROM pg_language l "
"JOIN pg_roles r ON l.lanowner = r.oid "
"WHERE l.lanname = '%s'" % lang)
cursor.execute(query)
return cursor.fetchone()[0]
def set_lang_owner(cursor, lang, owner):
"""Set language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
owner (str): name of new owner.
"""
query = "ALTER LANGUAGE %s OWNER TO %s" % (lang, owner)
executed_queries.append(query)
cursor.execute(query)
return True
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type="str", required=True, aliases=["login_db"]),
lang=dict(type="str", required=True, aliases=["name"]),
state=dict(type="str", default="present", choices=["absent", "present"]),
trust=dict(type="bool", default="no"),
force_trust=dict(type="bool", default="no"),
cascade=dict(type="bool", default="no"),
fail_on_drop=dict(type="bool", default="yes"),
session_role=dict(type="str"),
owner=dict(type="str"),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
db = module.params["db"]
lang = module.params["lang"]
state = module.params["state"]
trust = module.params["trust"]
force_trust = module.params["force_trust"]
cascade = module.params["cascade"]
fail_on_drop = module.params["fail_on_drop"]
owner = module.params["owner"]
conn_params = get_conn_params(module, module.params)
db_connection = connect_to_db(module, conn_params, autocommit=False)
cursor = db_connection.cursor()
changed = False
kw = {'db': db, 'lang': lang, 'trust': trust}
if state == "present":
if lang_exists(cursor, lang):
lang_trusted = lang_istrusted(cursor, lang)
if (lang_trusted and not trust) or (not lang_trusted and trust):
if module.check_mode:
changed = True
else:
changed = lang_altertrust(cursor, lang, trust)
else:
if module.check_mode:
changed = True
else:
changed = lang_add(cursor, lang, trust)
if force_trust:
changed = lang_altertrust(cursor, lang, trust)
else:
if lang_exists(cursor, lang):
if module.check_mode:
changed = True
kw['lang_dropped'] = True
else:
changed = lang_drop(cursor, lang, cascade)
if fail_on_drop and not changed:
msg = ("unable to drop language, use cascade "
"to delete dependencies or fail_on_drop=no to ignore")
module.fail_json(msg=msg)
kw['lang_dropped'] = changed
if owner and state == 'present':
if lang_exists(cursor, lang):
if owner != get_lang_owner(cursor, lang):
changed = set_lang_owner(cursor, lang, owner)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
kw['changed'] = changed
kw['queries'] = executed_queries
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseBadRequest, HttpResponse
from bootcamp.tasks.models import Task
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from bootcamp.tasks.forms import TaskForm
from django.contrib.auth.decorators import login_required
from bootcamp.decorators import ajax_required
import markdown
from django.template.loader import render_to_string
import requests,json
from bootcamp.utils.loadconfig import get_vars
def getvrflist(network):
if network.lower() == 'emc'.lower():
filename = '/etc/netbot/emcvrflist.txt'
elif network.lower() == 'mtn'.lower():
filename = '/etc/netbot/mtnvrflist.txt'
vrfnames = []
with open(filename) as f:
for line in f:
vrfnames.append(line)
return vrfnames
@login_required
def traceroute(request):
# task = get_object_or_404(Task, status=Task.ACTIVE)
emcvrfname=getvrflist('emc')
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""})
@login_required
def inttraceroute(request):
# task = get_object_or_404(Task, status=Task.ACTIVE)
emcvrfname=getvrflist('emc')
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':""})
@login_required()
def runtrace(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfname')
baseurl = get_vars('ansibengineemc')
emcvrfname=getvrflist('emc')
if sourceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '':
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"})
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
if vrf == 'True':
vrf="True"
else:
vrf="False"
return render(request, 'traceroute/runtraceroute.html', {'sourceip': sourceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl})
@login_required()
def runtraceapi(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
vrfname = request.POST.get('vrfname')
baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runtrace'
headers = {'content-type': 'application/json'}
temp= {}
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrfname']=vrfname
if vrf == 'True':
data['vrf']="True"
else:
data['vrf']="False"
try:
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
if not response.status_code == 201 :
temp['value']="Error!! Unexpected response. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
except requests.exceptions.RequestException as e:
# return "Error: {}".format(e)
temp['value']="Error connecting to API. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
return HttpResponse(response.text, content_type = "application/json")
@login_required()
def runinterfacetrace(request):
routerip = request.POST.get('sourceip')
interfaceip = request.POST.get('sourceint')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
emcvrfname=getvrflist('emc')
if routerip == '' or interfaceip == '' or destip == '' or vrf == '' or vrfname == '' or network == '':
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname,'message':"Please fill in all the details!!"})
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
if vrf == 'True':
vrf="True"
else:
vrf="False"
return render(request, 'traceroute/runinterfacetraceroute.html', {'routerip': routerip, 'interfaceip':interfaceip, 'destip':destip,'vrfname': vrfname, 'vrf':vrf,'baseurl':baseurl})
@login_required()
def runinterfacetraceapi(request):
routerip = request.POST.get('routerip')
interfaceip = request.POST.get('interfaceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
vrfname = request.POST.get('vrfname')
baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runinterfacetrace'
headers = {'content-type': 'application/json'}
temp= {}
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrfname']=vrfname
if vrf == 'True':
data['vrf']="True"
else:
data['vrf']="False"
try:
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
if not response.status_code == 201 :
temp['value']="Error!! Unexpected response. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
except requests.exceptions.RequestException as e:
# return "Error: {}".format(e)
temp['value']="Error connecting to API. Please report this"
return HttpResponse(json.dumps(temp), content_type = "application/json")
return HttpResponse(response.text, content_type = "application/json")
##deprecated method
@login_required()
def gettraceroute(request):
sourceip = request.POST.get('sourceip')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
if str(network).lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
url = baseurl+'/ansibengine/api/v1.0/gettraceroute'
headers = {'content-type': 'application/json'}
emcvrfname=getvrflist('emc')
if vrf is True:
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrf']="True"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
else:
data= {}
data['sourceip']=sourceip
data['destip']=destip
data['vrf']="False"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/traceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
return render(request, 'traceroute/runtraceroute.html', {'task': "task",'baseurl':baseurl})
##deprecated method
@login_required()
def getinterfacetraceroute(request):
routerip = request.POST.get('sourceip')
interfaceip = request.POST.get('sourceint')
destip = request.POST.get('destip')
vrf = request.POST.get('vrf')
network = request.POST.get('network')
vrfname = request.POST.get('vrfdropdown')
baseurl = get_vars('ansibengineemc')
if network.lower() == 'EMC'.lower():
baseurl = get_vars('ansibengineemc')
else:
baseurl = get_vars('ansibenginemtn')
url = baseurl+'/ansibengine/api/v1.0/getinterfacetraceroute'
headers = {'content-type': 'application/json'}
emcvrfname=getvrflist('emc')
if vrf is True:
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrf']="True"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
else:
data= {}
data['routerip']=routerip
data['interfaceip']=interfaceip
data['destip']=destip
data['vrf']="False"
data['vrfname']=vrfname
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
statuscode = response.status_code
if int(statuscode) == 200:
return render(request, 'traceroute/inttraceroute.html', {'task': "task", 'emcvrf':emcvrfname, 'message':"Another task is running! Please wait.."})
return render(request, 'traceroute/runinterfacetraceroute.html', {'task': "task",'baseurl':baseurl})
##deprecated method
def runtraceroute(request):
baseurl = get_vars('ansibengineemc')
if request.method == 'POST':
baseurl = request.POST.get('baseurl')
# if request.method == 'POST':
# baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runtraceroute'
headers = {'content-type': 'application/json'}
data= {}
data['value']="some"
data['ipath']='new value'
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
return HttpResponse(response.text, content_type = "application/json")
##deprecated method
def runinterfacetraceroute(request):
baseurl = get_vars('ansibengineemc')
if request.method == 'POST':
baseurl = request.POST.get('baseurl')
# if request.method == 'POST':
# baseurl = request.POST.get('baseurl')
url = baseurl+'/ansibengine/api/v1.0/runinterfacetraceroute'
headers = {'content-type': 'application/json'}
data= {}
data['value']=url
response = requests.post(url, data=json.dumps(data), headers=headers, auth=('netbot','N#tB@t'))
return HttpResponse(response.text, content_type = "application/json")
# task = get_object_or_404(Task, status=Task.ACTIVE)
# return render(request, 'traceroute/runtraceroute.html', {'task': "task"})
|
|
import galaxy.model
from galaxy.model.orm import *
from galaxy.model.mapping import context as sa_session
from base.twilltestcase import *
not_logged_in_as_admin_security_msg = 'You must be logged in as an administrator to access this feature.'
logged_in_as_admin_security_msg = 'You must be an administrator to access this feature.'
not_logged_in_security_msg = 'You must be logged in to create/submit sequencing requests'
form_one_name = "Request Form"
form_two_name = "Sample Form"
request_type_name = 'Test Requestype'
sample_states = [ ( 'New', 'Sample entered into the system' ),
( 'Received', 'Sample tube received' ),
( 'Done', 'Sequence run complete' ) ]
address1 = dict( short_desc="Office",
name="James+Bond",
institution="MI6" ,
address1="MI6+Headquarters",
address2="",
city="London",
state="London",
postal_code="007",
country="United+Kingdom",
phone="007-007-0007" )
def get_latest_form(form_name):
fdc_list = sa_session.query( galaxy.model.FormDefinitionCurrent ) \
.filter( galaxy.model.FormDefinitionCurrent.table.c.deleted==False ) \
.order_by( galaxy.model.FormDefinitionCurrent.table.c.create_time.desc() )
for fdc in fdc_list:
sa_session.refresh( fdc )
sa_session.refresh( fdc.latest_form )
if form_name == fdc.latest_form.name:
return fdc.latest_form
return None
class TestFormsAndRequests( TwillTestCase ):
def test_000_create_form( self ):
"""Testing creating a new form and editing it"""
self.logout()
self.login( email='test@bx.psu.edu' )
# create a form
global form_one_name
desc = "This is Form One's description"
formtype = galaxy.model.FormDefinition.types.REQUEST
self.create_form( name=form_one_name, desc=desc, formtype=formtype, num_fields=0 )
# Get the form_definition object for later tests
form_one = get_latest_form(form_one_name)
assert form_one is not None, 'Problem retrieving form named "%s" from the database' % name
# edit form & add few more fields
new_name = "Request Form (Renamed)"
new_desc = "This is Form One's Re-described"
self.edit_form( form_one.current.id, form_one.name, new_form_name=new_name, new_form_desc=new_desc )
self.home()
self.visit_page( 'forms/manage' )
self.check_page_for_string( new_name )
self.check_page_for_string( new_desc )
form_one_name = new_name
def test_005_add_form_fields( self ):
"""Testing adding fields to a form definition"""
fields = [dict(name='Test field name one',
desc='Test field description one',
type='SelectField',
required='optional',
selectlist=['option1', 'option2']),
dict(name='Test field name two',
desc='Test field description two',
type='AddressField',
required='optional'),
dict(name='Test field name three',
desc='Test field description three',
type='TextField',
required='required')]
form_one = get_latest_form(form_one_name)
self.form_add_field(form_one.current.id, form_one.name, form_one.desc, form_one.type, field_index=len(form_one.fields), fields=fields)
form_one_latest = get_latest_form(form_one_name)
assert len(form_one_latest.fields) == len(form_one.fields)+len(fields)
def test_015_create_sample_form( self ):
"""Testing creating another form (for samples)"""
global form_two_name
desc = "This is Form Two's description"
formtype = 'Sequencing Sample Form'
self.create_form( name=form_two_name, desc=desc, formtype=formtype )
self.home()
self.visit_page( 'forms/manage' )
self.check_page_for_string( form_two_name )
self.check_page_for_string( desc )
self.check_page_for_string( formtype )
def test_020_create_request_type( self ):
"""Testing creating a new requestype"""
request_form = get_latest_form(form_one_name)
sample_form = get_latest_form(form_two_name)
self.create_request_type(request_type_name, "test request type",
str(request_form.id), str(sample_form.id), sample_states )
global request_type
request_type = sa_session.query( galaxy.model.RequestType ) \
.filter( and_( galaxy.model.RequestType.table.c.name==request_type_name ) ) \
.order_by( desc( galaxy.model.RequestType.table.c.create_time ) ) \
.first()
assert request_type is not None, 'Problem retrieving request type named "%s" from the database' % request_type_name
def test_025_create_address_and_library( self ):
"""Testing address & library creation"""
# first create a regular user
self.logout()
self.login( email='test1@bx.psu.edu' )
self.logout()
self.login( email='test@bx.psu.edu' )
# first create a library for the request so that it can be submitted later
lib_name = 'TestLib001'
self.create_library( lib_name, '' )
self.visit_page( 'library_admin/browse_libraries' )
self.check_page_for_string( lib_name )
# Get the library object for later tests
global library_one
library_one = sa_session.query( galaxy.model.Library ) \
.filter( and_( galaxy.model.Library.table.c.name==lib_name,
galaxy.model.Library.table.c.deleted==False ) ) \
.first()
assert library_one is not None, 'Problem retrieving library named "%s" from the database' % lib_name
global admin_user
admin_user = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email=='test@bx.psu.edu' ) \
.first()
assert admin_user is not None, 'Problem retrieving user with email "test@bx.psu.edu" from the database'
# Get the admin user's private role for later use
global admin_user_private_role
admin_user_private_role = None
for role in admin_user.all_roles():
if role.name == admin_user.email and role.description == 'Private Role for %s' % admin_user.email:
admin_user_private_role = role
break
if not admin_user_private_role:
raise AssertionError( "Private role not found for user '%s'" % admin_user.email )
global regular_user1
regular_user1 = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email=='test1@bx.psu.edu' ) \
.first()
assert regular_user1 is not None, 'Problem retrieving user with email "test1@bx.psu.edu" from the database'
# Get the regular user's private role for later use
global regular_user1_private_role
regular_user1_private_role = None
for role in regular_user1.all_roles():
if role.name == regular_user1.email and role.description == 'Private Role for %s' % regular_user1.email:
regular_user1_private_role = role
break
if not regular_user1_private_role:
raise AssertionError( "Private role not found for user '%s'" % regular_user1.email )
# Set permissions on the library, sort for later testing
permissions_in = [ k for k, v in galaxy.model.Library.permitted_actions.items() ]
permissions_out = []
# Role one members are: admin_user, regular_user1. Each of these users will be permitted to
# LIBRARY_ADD, LIBRARY_MODIFY, LIBRARY_MANAGE for library items.
self.set_library_permissions( str( library_one.id ), library_one.name, str( regular_user1_private_role.id ), permissions_in, permissions_out )
# create a folder in the library
root_folder = library_one.root_folder
name = "Folder One"
self.add_folder( 'library_admin', str( library_one.id ), str( root_folder.id ), name=name, description='' )
global folder_one
folder_one = sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==root_folder.id,
galaxy.model.LibraryFolder.table.c.name==name ) ) \
.first()
assert folder_one is not None, 'Problem retrieving library folder named "%s" from the database' % name
self.home()
self.visit_url( '%s/library_admin/browse_library?obj_id=%s' % ( self.url, str( library_one.id ) ) )
self.check_page_for_string( name )
# create address
self.logout()
self.login( email='test1@bx.psu.edu' )
self.add_user_address( regular_user1.id, address1 )
global regular_user
regular_user = sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email=='test1@bx.psu.edu' ) \
.first()
global user_address
user_address = sa_session.query( galaxy.model.UserAddress ) \
.filter( and_( galaxy.model.UserAddress.table.c.desc==address1[ 'short_desc' ],
galaxy.model.UserAddress.table.c.deleted==False ) ) \
.first()
def test_030_create_request( self ):
"""Testing creating, editing and submitting a request as a regular user"""
# login as a regular user
self.logout()
self.login( email='test1@bx.psu.edu' )
# set field values
fields = ['option1', str(user_address.id), 'field three value']
# create the request
request_name, request_desc = 'Request One', 'Request One Description'
self.create_request(request_type.id, request_name, request_desc, library_one.id, 'none', fields)
global request_one
request_one = sa_session.query( galaxy.model.Request ) \
.filter( and_( galaxy.model.Request.table.c.name==request_name,
galaxy.model.Request.table.c.deleted==False ) ) \
.first()
# check if the request's state is now set to 'new'
assert request_one.state is not request_one.states.NEW, "The state of the request '%s' should be set to '%s'" \
% ( request_one.name, request_one.states.NEW )
# sample fields
samples = [ ( 'Sample One', [ 'S1 Field 0 Value' ] ),
( 'Sample Two', [ 'S2 Field 0 Value' ] ) ]
# add samples to this request
self.add_samples( request_one.id, request_one.name, samples )
# edit this request
fields = ['option2', str(user_address.id), 'field three value (edited)']
self.edit_request(request_one.id, request_one.name, request_one.name+' (Renamed)',
request_one.desc+' (Re-described)', library_one.id, folder_one.id, fields)
sa_session.refresh( request_one )
# check if the request is showing in the 'new' filter
self.check_request_grid(state='New', request_name=request_one.name)
# submit the request
self.submit_request( request_one.id, request_one.name )
sa_session.refresh( request_one )
# check if the request is showing in the 'submitted' filter
self.check_request_grid(state='Submitted', request_name=request_one.name)
# check if the request's state is now set to 'submitted'
assert request_one.state is not request_one.states.SUBMITTED, "The state of the request '%s' should be set to '%s'" \
% ( request_one.name, request_one.states.SUBMITTED )
def test_035_request_lifecycle( self ):
"""Testing request lifecycle as it goes through all the states"""
# goto admin manage requests page
self.logout()
self.login( email='test@bx.psu.edu' )
self.check_request_admin_grid(state='Submitted', request_name=request_one.name)
self.visit_url( "%s/requests_admin/list?sort=-create_time&operation=show_request&id=%s" \
% ( self.url, self.security.encode_id( request_one.id ) ))
self.check_page_for_string( 'Sequencing Request "%s"' % request_one.name )
# set bar codes for the samples
bar_codes = [ '1234567890', '0987654321' ]
self.add_bar_codes( request_one.id, request_one.name, bar_codes )
# change the states of all the samples of this request
for sample in request_one.samples:
self.change_sample_state( sample.name, sample.id, request_type.states[1].id, request_type.states[1].name )
self.change_sample_state( sample.name, sample.id, request_type.states[2].id, request_type.states[2].name )
self.home()
sa_session.refresh( request_one )
self.logout()
self.login( email='test1@bx.psu.edu' )
# check if the request's state is now set to 'complete'
self.check_request_grid(state='Complete', request_name=request_one.name)
assert request_one.state is not request_one.states.COMPLETE, "The state of the request '%s' should be set to '%s'" \
% ( request_one.name, request_one.states.COMPLETE )
def test_040_admin_create_request_on_behalf_of_regular_user( self ):
"""Testing creating and submitting a request as an admin on behalf of a regular user"""
self.logout()
self.login( email='test@bx.psu.edu' )
request_name = "RequestTwo"
# simulate request creation
url_str = '%s/requests_admin/new?create=True&create_request_button=Save&select_request_type=%i&select_user=%i&name=%s&library_id=%i&folder_id=%i&refresh=True&field_2=%s&field_0=%s&field_1=%i' \
% ( self.url, request_type.id, regular_user.id, request_name, library_one.id, library_one.root_folder.id, "field_2_value", 'option1', user_address.id )
self.home()
self.visit_url( url_str )
self.check_page_for_string( "The new request named %s has been created" % request_name )
global request_two
request_two = sa_session.query( galaxy.model.Request ) \
.filter( and_( galaxy.model.Request.table.c.name==request_name,
galaxy.model.Request.table.c.deleted==False ) ) \
.first()
# check if the request is showing in the 'new' filter
self.check_request_admin_grid(state='New', request_name=request_two.name)
# check if the request's state is now set to 'new'
assert request_two.state is not request_two.states.NEW, "The state of the request '%s' should be set to '%s'" \
% ( request_two.name, request_two.states.NEW )
# sample fields
samples = [ ( 'Sample One', [ 'S1 Field 0 Value' ] ),
( 'Sample Two', [ 'S2 Field 0 Value' ] ) ]
# add samples to this request
self.add_samples( request_two.id, request_two.name, samples )
# submit the request
self.submit_request_as_admin( request_two.id, request_two.name )
sa_session.refresh( request_two )
# check if the request is showing in the 'submitted' filter
self.check_request_admin_grid(state='Submitted', request_name=request_two.name)
# check if the request's state is now set to 'submitted'
assert request_two.state is not request_two.states.SUBMITTED, "The state of the request '%s' should be set to '%s'" \
% ( request_two.name, request_two.states.SUBMITTED )
# check if both the requests is showing in the 'All' filter
self.check_request_admin_grid(state='All', request_name=request_one.name)
self.check_request_admin_grid(state='All', request_name=request_two.name)
def test_045_reject_request( self ):
'''Testing rejecting a request'''
self.logout()
self.login( email='test@bx.psu.edu' )
self.reject_request( request_two.id, request_two.name, "Rejection test comment" )
sa_session.refresh( request_two )
# check if the request is showing in the 'rejected' filter
self.check_request_admin_grid(state='Rejected', request_name=request_two.name)
# check if the request's state is now set to 'submitted'
assert request_two.state is not request_two.states.REJECTED, "The state of the request '%s' should be set to '%s'" \
% ( request_two.name, request_two.states.REJECTED )
|
|
"""
solve a scalar diffusion-reaction equation:
phi_t = kappa phi_{xx} + (1/tau) R(phi)
using operator splitting, with implicit diffusion
M. Zingale
"""
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy.integrate import ode
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['mathtext.rm'] = 'serif'
mpl.rcParams['font.size'] = 12
mpl.rcParams['legend.fontsize'] = 'large'
mpl.rcParams['figure.titlesize'] = 'medium'
def frhs(t, phi, tau):
""" reaction ODE righthand side """
return 0.25*phi*(1.0 - phi)/tau
def jac(t, phi):
return None
def react(gr, phi, tau, dt):
""" react phi through timestep dt """
phinew = gr.scratch_array()
for i in range(gr.ilo, gr.ihi+1):
r = ode(frhs,jac).set_integrator("vode", method="adams",
with_jacobian=False)
r.set_initial_value(phi[i], 0.0).set_f_params(tau)
r.integrate(r.t+dt)
phinew[i] = r.y[0]
return phinew
def diffuse(gr, phi, kappa, dt):
""" diffuse phi implicitly (C-N) through timestep dt """
phinew = gr.scratch_array()
alpha = kappa*dt/gr.dx**2
# create the RHS of the matrix
R = phi[gr.ilo:gr.ihi+1] + \
0.5*alpha*( phi[gr.ilo-1:gr.ihi] -
2.0*phi[gr.ilo :gr.ihi+1] +
phi[gr.ilo+1:gr.ihi+2])
# create the diagonal, d+1 and d-1 parts of the matrix
d = (1.0 + alpha)*np.ones(gr.nx)
u = -0.5*alpha*np.ones(gr.nx)
u[0] = 0.0
l = -0.5*alpha*np.ones(gr.nx)
l[gr.nx-1] = 0.0
# set the boundary conditions by changing the matrix elements
# homogeneous neumann
d[0] = 1.0 + 0.5*alpha
d[gr.nx-1] = 1.0 + 0.5*alpha
# dirichlet
#d[0] = 1.0 + 1.5*alpha
#R[0] += alpha*0.0
#d[gr.nx-1] = 1.0 + 1.5*alpha
#R[gr.nx-1] += alpha*0.0
# solve
A = np.matrix([u,d,l])
phinew[gr.ilo:gr.ihi+1] = linalg.solve_banded((1,1), A, R)
return phinew
def est_dt(gr, kappa, tau):
""" estimate the timestep """
# use the proported flame speed
s = np.sqrt(kappa/tau)
dt = gr.dx/s
return dt
class Grid(object):
def __init__(self, nx, ng=1, xmin=0.0, xmax=1.0, vars=None):
""" grid class initialization """
self.nx = nx
self.ng = ng
self.xmin = xmin
self.xmax = xmax
self.dx = (xmax - xmin)/nx
self.x = (np.arange(nx+2*ng) + 0.5 - ng)*self.dx + xmin
self.ilo = ng
self.ihi = ng+nx-1
self.data = {}
for v in vars:
self.data[v] = np.zeros((2*ng+nx), dtype=np.float64)
def fillBC(self, var):
if not var in self.data.keys():
sys.exit("invalid variable")
vp = self.data[var]
# Neumann BCs
vp[0:self.ilo+1] = vp[self.ilo]
vp[self.ihi+1:] = vp[self.ihi]
def scratch_array(self):
return np.zeros((2*self.ng+self.nx), dtype=np.float64)
def initialize(self):
""" initial conditions """
phi = self.data["phi"]
phi[:] = 0.0
phi[self.nx//2-int(0.15*self.nx):self.nx//2+int(0.15*self.nx)+1] = 1.0
def interpolate(x, phi, phipt):
""" find the x position corresponding to phipt """
idx = (np.where(phi >= 0.5))[0][0]
xs = np.array([x[idx-1], x[idx], x[idx+1]])
phis = np.array([phi[idx-1], phi[idx], phi[idx+1]])
xpos = 0.0
for m in range(len(phis)):
# create Lagrange basis polynomial for point m
l = None
n = 0
for n in range(len(phis)):
if n == m:
continue
if l == None:
l = (phipt - phis[n])/(phis[m] - phis[n])
else:
l *= (phipt - phis[n])/(phis[m] - phis[n])
xpos += xs[m]*l
return xpos
def evolve(nx, kappa, tau, tmax, dovis=0, return_initial=0):
"""
the main evolution loop. Evolve
phi_t = kappa phi_{xx} + (1/tau) R(phi)
from t = 0 to tmax
"""
# create the grid
gr = Grid(nx, ng=1, xmin = 0.0, xmax=100.0,
vars=["phi", "phi1", "phi2"])
# pointers to the data at various stages
phi = gr.data["phi"]
phi1 = gr.data["phi1"]
phi2 = gr.data["phi2"]
# initialize
gr.initialize()
phi_init = phi.copy()
# runtime plotting
if dovis == 1: plt.ion()
t = 0.0
while t < tmax:
dt = est_dt(gr, kappa, tau)
if t + dt > tmax:
dt = tmax - t
# react for dt/2
phi1[:] = react(gr, phi, tau, dt/2)
gr.fillBC("phi1")
# diffuse for dt
phi2[:] = diffuse(gr, phi1, kappa, dt)
gr.fillBC("phi2")
# react for dt/2 -- this is the updated solution
phi[:] = react(gr, phi2, tau, dt/2)
gr.fillBC("phi")
t += dt
if dovis == 1:
plt.clf()
plt.plot(gr.x, phi)
plt.xlim(gr.xmin,gr.xmax)
plt.ylim(0.0,1.0)
plt.draw()
if return_initial == 1:
return phi, gr.x, phi_init
else:
return phi, gr.x
# phi is a reaction progress variable, so phi lies between 0 and 1
kappa = 0.1
tau = 1.0
tmax1 = 60.0
nx = 256
phi1, x1 = evolve(nx, kappa, tau, tmax1)
tmax2 = 80.0
phi2, x2 = evolve(nx, kappa, tau, tmax2)
plt.plot(x1, phi1)
plt.plot(x2, phi2, ls=":")
plt.savefig("flame.png")
# estimate the speed -- interpolate to x corresponding to where phi > 0.2
xpos1 = interpolate(x1, phi1, 0.2)
xpos2 = interpolate(x2, phi2, 0.2)
print ((xpos1 - xpos2)/(tmax1 - tmax2), np.sqrt(kappa/tau))
# estimate the speed -- interpolate to x corresponding to where phi > 0.5
xpos1 = interpolate(x1, phi1, 0.5)
xpos2 = interpolate(x2, phi2, 0.5)
print ((xpos1 - xpos2)/(tmax1 - tmax2), np.sqrt(kappa/tau))
# estimate the speed -- interpolate to x corresponding to where phi > 0.8
xpos1 = interpolate(x1, phi1, 0.8)
xpos2 = interpolate(x2, phi2, 0.8)
print ((xpos1 - xpos2)/(tmax1 - tmax2), np.sqrt(kappa/tau))
# make a pretty plot
plt.clf()
dt = 8.0
for i in range(0, 10):
tend = (i+1)*dt
p, x, phi0 = evolve(nx, kappa, tau, tend, return_initial=1)
c = 1.0 - (0.1 + i*0.1)
plt.plot(x, p, color=str(c))
plt.plot(x, phi0, ls=":", color="0.9", zorder=-1)
plt.xlabel("$x$")
plt.ylabel("$\phi$")
#plt.title(r"Diffusion-Reaction, $N = {}, \, \kappa = {:3.2f}, \, \tau = {:3.2f}$".format(nx, kappa, tau, dt))
#, \, \delta t = {:3.2f}$ (between lines)
plt.tight_layout()
plt.xlim(0, 100)
plt.savefig("flame_seq.pdf")
|
|
"""
Contains the protocols, commands, and client factory needed for the Server
and Portal to communicate with each other, letting Portal work as a proxy.
Both sides use this same protocol.
The separation works like this:
Portal - (AMP client) handles protocols. It contains a list of connected
sessions in a dictionary for identifying the respective player
connected. If it looses the AMP connection it will automatically
try to reconnect.
Server - (AMP server) Handles all mud operations. The server holds its own list
of sessions tied to player objects. This is synced against the portal
at startup and when a session connects/disconnects
"""
# imports needed on both server and portal side
import os
from time import time
from collections import defaultdict
try:
import cPickle as pickle
except ImportError:
import pickle
from twisted.protocols import amp
from twisted.internet import protocol, task
from twisted.internet.defer import Deferred
from evennia.utils.utils import to_str, variable_from_module
# communication bits
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
SCONN = chr(9) # server creating new connection (for irc/imc2 bots etc)
PCONNSYNC = chr(10) # portal post-syncing a session
AMP_MAXLEN = 65535 # max allowed data length in AMP protocol (cannot be changed)
BATCH_RATE = 500 # max commands/sec before switching to batch-sending
BATCH_TIMEOUT = 1.0 # how often to poll to empty batch queue, in seconds
# buffers
_SENDBATCH = defaultdict(list)
_MSGBUFFER = defaultdict(list)
def get_restart_mode(restart_file):
"""
Parse the server/portal restart status
"""
if os.path.exists(restart_file):
flag = open(restart_file, 'r').read()
return flag == "True"
return False
class AmpServerFactory(protocol.ServerFactory):
"""
This factory creates the Server as a new AMPProtocol instance for accepting
connections from the Portal.
"""
def __init__(self, server):
"""
server: The Evennia server service instance
protocol: The protocol the factory creates instances of.
"""
self.server = server
self.protocol = AMPProtocol
def buildProtocol(self, addr):
"""
Start a new connection, and store it on the service object
"""
#print "Evennia Server connected to Portal at %s." % addr
self.server.amp_protocol = AMPProtocol()
self.server.amp_protocol.factory = self
return self.server.amp_protocol
class AmpClientFactory(protocol.ReconnectingClientFactory):
"""
This factory creates an instance of the Portal, an AMPProtocol
instances to use to connect
"""
# Initial reconnect delay in seconds.
initialDelay = 1
factor = 1.5
maxDelay = 1
def __init__(self, portal):
self.portal = portal
self.protocol = AMPProtocol
def startedConnecting(self, connector):
"""
Called when starting to try to connect to the MUD server.
"""
pass
#print 'AMP started to connect:', connector
def buildProtocol(self, addr):
"""
Creates an AMPProtocol instance when connecting to the server.
"""
#print "Portal connected to Evennia server at %s." % addr
self.resetDelay()
self.portal.amp_protocol = AMPProtocol()
self.portal.amp_protocol.factory = self
return self.portal.amp_protocol
def clientConnectionLost(self, connector, reason):
"""
Called when the AMP connection to the MUD server is lost.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
# Don't translate this; avoid loading django on portal side.
self.maxDelay = 10
self.portal.sessions.announce_all(" ... Portal lost connection to Server.")
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
"""
Called when an AMP connection attempt to the MUD server fails.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
self.maxDelay = 10
self.portal.sessions.announce_all(" ...")
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
# AMP Communication Command types
class MsgPortal2Server(amp.Command):
"""
Message portal -> server
"""
key = "MsgPortal2Server"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class MsgServer2Portal(amp.Command):
"""
Message server -> portal
"""
key = "MsgServer2Portal"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class ServerAdmin(amp.Command):
"""
Portal -> Server
Sent when the portal needs to perform admin
operations on the server, such as when a new
session connects or resyncs
"""
key = "ServerAdmin"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class PortalAdmin(amp.Command):
"""
Server -> Portal
Sent when the server needs to perform admin
operations on the portal.
"""
key = "PortalAdmin"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class FunctionCall(amp.Command):
"""
Bidirectional
Sent when either process needs to call an
arbitrary function in the other. This does
not use the batch-send functionality.
"""
key = "FunctionCall"
arguments = [('module', amp.String()),
('function', amp.String()),
('args', amp.String()),
('kwargs', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = [('result', amp.String())]
# Helper functions
dumps = lambda data: to_str(pickle.dumps(to_str(data), pickle.HIGHEST_PROTOCOL))
loads = lambda data: pickle.loads(to_str(data))
#------------------------------------------------------------
# Core AMP protocol for communication Server <-> Portal
#------------------------------------------------------------
class AMPProtocol(amp.AMP):
"""
This is the protocol that the MUD server and the proxy server
communicate to each other with. AMP is a bi-directional protocol, so
both the proxy and the MUD use the same commands and protocol.
AMP specifies responder methods here and connect them to amp.Command
subclasses that specify the datatypes of the input/output of these methods.
"""
# helper methods
def __init__(self, *args, **kwargs):
"""
Initialize protocol with some things that need to be
in place already before connecting both on portal and server.
"""
self.min_batch_step = 1.0 / BATCH_RATE
self.lastsend = time()
self.task = task.LoopingCall(self.batch_send, None, None)
self.task.start(BATCH_TIMEOUT)
def connectionMade(self):
"""
This is called when a connection is established
between server and portal. AMP calls it on both sides,
so we need to make sure to only trigger resync from the
portal side.
"""
self.transport.setTcpNoDelay(True) # this makes for a factor x10 faster sends!
if hasattr(self.factory, "portal"):
# only the portal has the 'portal' property, so we know we are
# on the portal side and can initialize the connection.
sessdata = self.factory.portal.sessions.get_all_sync_data()
self.call_remote_ServerAdmin(0,
PSYNC,
data=sessdata)
self.factory.portal.sessions.at_server_connection()
if hasattr(self.factory, "server_restart_mode"):
del self.factory.server_restart_mode
# Error handling
def errback(self, e, info):
"error handler, to avoid dropping connections on server tracebacks."
e.trap(Exception)
print "AMP Error for %(info)s: %(e)s" % {'info': info,
'e': e.getErrorMessage()}
def batch_send(self, command, sessid, **kwargs):
"""
This will batch data together to send fewer, large batches.
Kwargs:
force_direct: send direct
"""
#print "batch_send 1:", command, sessid
global _SENDBATCH
if command is None:
# called by the automatic cleanup mechanism
commands = [cmd for cmd in (MsgPortal2Server, MsgServer2Portal, ServerAdmin, PortalAdmin)
if _SENDBATCH.get(cmd, False)]
if not commands:
return
else:
# called to send right away
commands = [command]
_SENDBATCH[command].append((sessid, kwargs))
force_direct = kwargs.pop("force_direct", False)
now = time()
#print "batch_send 2:", now, self.lastsend, self.min_batch_step, now-self.lastsend > self.min_batch_step
if force_direct or now - self.lastsend > self.min_batch_step:
for command in commands:
batch = dumps(_SENDBATCH[command])
_SENDBATCH[command] = []
# split in parts small enough to fit in AMP MAXLEN
to_send = [batch[i:i+AMP_MAXLEN] for i in range(0, len(batch), AMP_MAXLEN)]
nparts = len(to_send)
# tag this batch
hashid = "%s-%s" % (id(batch), now)
if nparts == 1:
deferreds = [self.callRemote(command,
hashid=hashid,
data=batch,
ipart=0,
nparts=1).addErrback(self.errback, command.key)]
else:
deferreds = []
for ipart, part in enumerate(to_send):
deferred = self.callRemote(command,
hashid=hashid,
data=part,
ipart=ipart,
nparts=nparts)
deferred.addErrback(self.errback, "%s part %i/%i" % (command.key, ipart, nparts))
deferreds.append(deferred)
self.lastsend = time() # don't use now here, keep it as up-to-date as possible
return deferreds
def batch_recv(self, hashid, data, ipart, nparts):
"""
This will receive and unpack data sent as a batch. This both
handles too-long data as well as batch-sending very fast-
arriving commands.
"""
global _MSGBUFFER
if nparts == 1:
# most common case
return loads(data)
else:
if ipart < nparts-1:
# not yet complete
_MSGBUFFER[hashid].append(data)
return []
else:
# all parts in place - deserialize it
return loads("".join(_MSGBUFFER.pop(hashid)) + data)
# Message definition + helper methods to call/create each message type
# Portal -> Server Msg
def amp_msg_portal2server(self, hashid, data, ipart, nparts):
"""
Relays message to server. This method is executed on the Server.
Since AMP has a limit of 65355 bytes per message, it's possible the
data comes in multiple chunks; if so (nparts>1) we buffer the data
and wait for the remaining parts to arrive before continuing.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
#print "msg portal -> server (server side):", sessid, msg, loads(ret["data"])
self.factory.server.sessions.data_in(sessid,
text=kwargs["msg"],
data=kwargs["data"])
return {}
MsgPortal2Server.responder(amp_msg_portal2server)
def call_remote_MsgPortal2Server(self, sessid, msg, data=""):
"""
Access method called by the Portal and executed on the Portal.
"""
#print "msg portal->server (portal side):", sessid, msg, data
return self.batch_send(MsgPortal2Server, sessid,
msg=msg if msg is not None else "",
data=data)
# Server -> Portal message
def amp_msg_server2portal(self, hashid, data, ipart, nparts):
"""
Relays message to Portal. This method is executed on the Portal.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
#print "msg server->portal (portal side):", sessid, ret["text"], loads(ret["data"])
self.factory.portal.sessions.data_out(sessid,
text=kwargs["msg"],
data=kwargs["data"])
return {}
MsgServer2Portal.responder(amp_msg_server2portal)
def amp_batch_server2portal(self, hashid, data, ipart, nparts):
"""
Relays batch data to Portal. This method is executed on the Portal.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
if batch is not None:
for (sessid, kwargs) in batch:
self.factory.portal.sessions.data_out(sessid,
text=kwargs["msg"],
**kwargs["data"])
return {}
MsgServer2Portal.responder(amp_batch_server2portal)
def call_remote_MsgServer2Portal(self, sessid, msg, data=""):
"""
Access method called by the Server and executed on the Server.
"""
#print "msg server->portal (server side):", sessid, msg, data
return self.batch_send(MsgServer2Portal, sessid, msg=msg, data=data)
# Server administration from the Portal side
def amp_server_admin(self, hashid, data, ipart, nparts):
"""
This allows the portal to perform admin
operations on the server. This is executed on the Server.
"""
#print "serveradmin (server side):", hashid, ipart, nparts
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
operation = kwargs["operation"]
data = kwargs["data"]
server_sessionhandler = self.factory.server.sessions
#print "serveradmin (server side):", sessid, ord(operation), data
if operation == PCONN: # portal_session_connect
# create a new session and sync it
server_sessionhandler.portal_connect(data)
elif operation == PCONNSYNC: #portal_session_sync
server_sessionhandler.portal_session_sync(data)
elif operation == PDISCONN: # portal_session_disconnect
# session closed from portal side
self.factory.server.sessions.portal_disconnect(sessid)
elif operation == PSYNC: # portal_session_sync
# force a resync of sessions when portal reconnects to
# server (e.g. after a server reboot) the data kwarg
# contains a dict {sessid: {arg1:val1,...}}
# representing the attributes to sync for each
# session.
server_sessionhandler.portal_sessions_sync(data)
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
ServerAdmin.responder(amp_server_admin)
def call_remote_ServerAdmin(self, sessid, operation="", data=""):
"""
Access method called by the Portal and Executed on the Portal.
"""
#print "serveradmin (portal side):", sessid, ord(operation), data
if hasattr(self.factory, "server_restart_mode"):
return self.batch_send(ServerAdmin, sessid, force_direct=True, operation=operation, data=data)
return self.batch_send(ServerAdmin, sessid, operation=operation, data=data)
# Portal administraton from the Server side
def amp_portal_admin(self, hashid, data, ipart, nparts):
"""
This allows the server to perform admin
operations on the portal. This is executed on the Portal.
"""
#print "portaladmin (portal side):", sessid, ord(operation), data
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
operation = kwargs["operation"]
data = kwargs["data"]
portal_sessionhandler = self.factory.portal.sessions
if operation == SLOGIN: # server_session_login
# a session has authenticated; sync it.
portal_sessionhandler.server_logged_in(sessid, data)
elif operation == SDISCONN: # server_session_disconnect
# the server is ordering to disconnect the session
portal_sessionhandler.server_disconnect(sessid, reason=data)
elif operation == SDISCONNALL: # server_session_disconnect_all
# server orders all sessions to disconnect
portal_sessionhandler.server_disconnect_all(reason=data)
elif operation == SSHUTD: # server_shutdown
# the server orders the portal to shut down
self.factory.portal.shutdown(restart=False)
elif operation == SSYNC: # server_session_sync
# server wants to save session data to the portal,
# maybe because it's about to shut down.
portal_sessionhandler.server_session_sync(data)
# set a flag in case we are about to shut down soon
self.factory.server_restart_mode = True
elif operation == SCONN: # server_force_connection (for irc/imc2 etc)
portal_sessionhandler.server_connect(**data)
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
PortalAdmin.responder(amp_portal_admin)
def call_remote_PortalAdmin(self, sessid, operation="", data=""):
"""
Access method called by the server side.
"""
if operation == SSYNC:
return self.batch_send(PortalAdmin, sessid, force_direct=True, operation=operation, data=data)
return self.batch_send(PortalAdmin, sessid, operation=operation, data=data)
# Extra functions
def amp_function_call(self, module, function, args, **kwargs):
"""
This allows Portal- and Server-process to call an arbitrary function
in the other process. It is intended for use by plugin modules.
"""
args = loads(args)
kwargs = loads(kwargs)
# call the function (don't catch tracebacks here)
result = variable_from_module(module, function)(*args, **kwargs)
if isinstance(result, Deferred):
# if result is a deferred, attach handler to properly
# wrap the return value
result.addCallback(lambda r: {"result": dumps(r)})
return result
else:
return {'result': dumps(result)}
FunctionCall.responder(amp_function_call)
def call_remote_FunctionCall(self, modulepath, functionname, *args, **kwargs):
"""
Access method called by either process. This will call an arbitrary
function on the other process (On Portal if calling from Server and
vice versa).
Inputs:
modulepath (str) - python path to module holding function to call
functionname (str) - name of function in given module
*args, **kwargs will be used as arguments/keyword args for the
remote function call
Returns:
A deferred that fires with the return value of the remote
function call
"""
return self.callRemote(FunctionCall,
module=modulepath,
function=functionname,
args=dumps(args),
kwargs=dumps(kwargs)).addCallback(lambda r: loads(r["result"])).addErrback(self.errback, "FunctionCall")
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUVNFMetadata(NURESTObject):
""" Represents a VNFMetadata in the VSD
Notes:
The VNF deployment properties that includes the location of the image, bootstrap config and rest of the libvirt domain XML template defined as text file.
"""
__rest_name__ = "vnfmetadata"
__resource_name__ = "vnfmetadatas"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a VNFMetadata instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> vnfmetadata = NUVNFMetadata(id=u'xxxx-xxx-xxx-xxx', name=u'VNFMetadata')
>>> vnfmetadata = NUVNFMetadata(data=my_dict)
"""
super(NUVNFMetadata, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._description = None
self._blob = None
self._entity_scope = None
self._assoc_entity_type = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="blob", remote_name="blob", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="assoc_entity_type", remote_name="assocEntityType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
Name of the VNF Metadata
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the VNF Metadata
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def description(self):
""" Get description value.
Notes:
Description of the VNF Metadata
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the VNF Metadata
"""
self._description = value
@property
def blob(self):
""" Get blob value.
Notes:
The Metadata blob
"""
return self._blob
@blob.setter
def blob(self, value):
""" Set blob value.
Notes:
The Metadata blob
"""
self._blob = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def assoc_entity_type(self):
""" Get assoc_entity_type value.
Notes:
Type of the entity to which the Metadata is associated to.
This attribute is named `assocEntityType` in VSD API.
"""
return self._assoc_entity_type
@assoc_entity_type.setter
def assoc_entity_type(self, value):
""" Set assoc_entity_type value.
Notes:
Type of the entity to which the Metadata is associated to.
This attribute is named `assocEntityType` in VSD API.
"""
self._assoc_entity_type = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
__source__ = 'https://leetcode.com/problems/binary-tree-postorder-traversal/#/solutions'
# https://github.com/kamyu104/LeetCode/blob/master/Python/binary-tree-postorder-traversal.py
# Time: O(n)
# Space: O(1)
# Tree
#
# Description: Leetcode # 145. Binary Tree Postorder Traversal
#
# Given a binary tree, return the postorder traversal of its nodes' values.
#
# For example:
# Given binary tree {1,#,2,3},
# 1
# \
# 2
# /
# 3
# return [3,2,1].
#
# Note: Recursive solution is trivial, could you do it iteratively?
#
# Related Topics
# Tree Stack
# Similar Questions
# Binary Tree Inorder Traversal
#
import unittest
from collections import deque
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Morris Traversal Solution
class Solution:
# @param root, a tree node
# @return a list of integers
def postorderTraversal(self, root):
dummy = TreeNode(0)
dummy.left = root
result, cur = [] , dummy
while cur:
#1) no left child
if cur.left is None:
cur = cur.right
else:
#2) go the left child's right-most child
node = cur.left
while node.right and node.right != cur:
node = node.right
#3) no right child : node.right = cur and cur = cur.left
if node.right == None:
node.right = cur
cur = cur.left
else:
#4) has right child : node.right = None and cur = cur.right
result += self.traceBack(cur.left, node)
node.right = None
cur =cur.right
return result
def traceBack(self, frm, to):
result, cur = [], frm
while cur != to:
result.append(cur.val)
cur = cur.right
result.append(to.val)
result.reverse()
return result
# Time: O(n)
# Space: O(n)
# Stack Solution
class Solution2:
# @param root, a tree node
# @return a list of integers
def postorderTraversal(self, root):
result, stack, current, last_traversed = [], [] ,root, None
while current or stack:
if current:
stack.append(current)
current = current.left
else:
parent = stack[-1]
if parent.right in (None, last_traversed):
result.append(parent.val)
last_traversed = stack.pop()
else:
current = parent.right
return result
class SolutionOther(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
res = []
stack = [root]
while stack:
cur = stack.pop()
res.append(cur.val)
if cur.left:
stack.append(cur.left)
if cur.right:
stack.append(cur.right)
res.reverse()
return res
class Solution3(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
res = []
self.dfs(root, res)
return res
def dfs(self, root, res):
if not root:
return
self.dfs(root.left, res)
self.dfs(root.right, res)
res.append(root.val)
# Test
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
root = TreeNode(1)
root.right = TreeNode(2)
root.right.left = TreeNode(3)
result = Solution().postorderTraversal(root)
print result
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/binary-tree-postorder-traversal/solution/
#
# http://www.programcreek.com/2012/12/leetcode-solution-of-iterative-binary-tree-postorder-traversal-in-java/
# The order of "Postorder" is: left child -> right child -> parent node.
# Find the relation between the previously visited node and the current node
# Use a stack to track nodes
# As we go down the tree, check the previously visited node.
# If it is the parent of the current node, we should add current node to stack.
# When there is no children for current node, pop it from stack.
# Then the previous node become to be under the current node for next loop.
1. DFS:
# 0ms 100%
class Solution {
public List<Integer> postorderTraversal(TreeNode root) {
List<Integer> result = new ArrayList<>();
postorder(root, result);
return result;
}
private void postorder(TreeNode root, List<Integer> result) {
if (root == null) {
return;
}
postorder(root.left, result);
postorder(root.right, result);
result.add(root.val);
}
}
2. BFS
# 1ms 65.62%
class Solution {
public List<Integer> postorderTraversal(TreeNode root) {
List<Integer> res = new ArrayList<>();
if (root == null) {
return res;
}
Stack<TreeNode> stack = new Stack<>();
stack.push(root);
while (!stack.isEmpty()) {
TreeNode cur = stack.pop();
res.add(cur.val);
if (cur.left != null) {
stack.push(cur.left);
}
if (cur.right != null) {
stack.push(cur.right);
}
}
Collections.reverse(res);
return res;
}
}
# PostOrder
# 44.87% 1ms
class Solution {
public List<Integer> postorderTraversal(TreeNode root) {
List<Integer> res = new ArrayList();
if (root == null) return res;
Stack<TreeNode> stack = new Stack<>();
TreeNode p = root;
while (!stack.isEmpty() || p != null) {
if (p != null) {
stack.push(p);
res.add(0, p.val);
p = p.right;
} else {
TreeNode node = stack.pop();
p = node.left;
}
}
return res;
}
}
##################################################################
# In Order Traverse
public List<Integer> inorderTraversal(TreeNode root) {
List<Integer> result = new ArrayList<>();
Deque<TreeNode> stack = new ArrayDeque<>();
TreeNode p = root;
while(!stack.isEmpty() || p != null) {
if(p != null) {
stack.push(p);
p = p.left;
} else {
TreeNode node = stack.pop();
result.add(node.val); // Add after all left children
p = node.right;
}
}
return result;
}
# Pre Order Traverse
public List<Integer> preorderTraversal(TreeNode root) {
List<Integer> result = new ArrayList<>();
Deque<TreeNode> stack = new ArrayDeque<>();
TreeNode p = root;
while(!stack.isEmpty() || p != null) {
if(p != null) {
stack.push(p);
result.add(p.val); // Add before going to children
p = p.left;
} else {
TreeNode node = stack.pop();
p = node.right;
}
}
return result;
}
}
'''
|
|
from timeit import default_timer as timer
import json
import Queue
import argparse
import os
import csv
from tabulate import tabulate
from foofah_libs.foofah_node import FoofahNode
import foofah_libs.operators as Operations
import numpy as np
from foofah_libs.generate_prog import create_python_prog
MAX_STEPS = float("inf")
ALGO_BFS = 0
ALGO_A_STAR = 1
ALGO_A_STAR_NAIVE = 2
ALGO_AWA = 3
def reconstruct_path(current):
if current is None:
total_path = []
else:
total_path = [current]
while current.parent is not None:
current = current.parent
total_path.append(current)
return total_path
def a_star_search(raw_data, target, ops, debug=0, timeout=300, algo=ALGO_A_STAR, batch=True,
epsilon=1, bound=float("inf"), p1=True, p2=True, p3=True):
FoofahNode.target = target
root_op = ({'fxn': None, 'name': 'start', 'char': 'start', 'cost': 1.0}, 0)
root = FoofahNode(raw_data, root_op, None, {})
goal_op = ({'fxn': None, 'name': 'end', 'char': 'end', 'cost': 0.0}, 0)
goal_node = FoofahNode(target, goal_op, None, {})
FoofahNode.goal_node = goal_node
root.g_score = 0.0
if algo == ALGO_BFS:
root.h_score = 0
elif algo == ALGO_A_STAR:
root.h_score = root.get_h_score(batch=batch)
elif algo == ALGO_A_STAR_NAIVE:
root.h_score = root.get_h_score_rule()
root.f_score = root.g_score + epsilon * root.h_score
# Switch to using priority queue because it is thread safe
open_q = Queue.PriorityQueue()
open_q_cache = None
cost_q = {}
closed_nodes = set()
final_node = None
start_time = timer()
open_q.put(root)
while not open_q.empty():
node = open_q.get(block=False)
cur_time = timer()
if cur_time - start_time > timeout:
print "*** Exceeded time limit of %d seconds" % timeout
break
if debug >= 1:
if node.parent:
print "f_score:", node.f_score, "h_score:", node.h_score, "g_score:", node.g_score, "id:", node.node_id, "p_id:", node.parent.node_id, "depth:", node.depth, node, node.contents
print
else:
print "f_score:", node.f_score, "h_score:", node.h_score, "g_score:", node.g_score, "id:", node.node_id, "p_id:", "None", "depth:", node.depth, node, node.contents
print
closed_nodes.add(node)
if node == goal_node:
final_node = node
break
my_children = node.make_children(ops, bound=bound, p1=p1, p2=p2, p3=p3)
for c in my_children:
if c in closed_nodes:
continue
if algo == ALGO_BFS:
c.h_score = 0
c.g_score = node.g_score + node.operation[0]['cost']
elif algo == ALGO_A_STAR:
c.h_score = c.get_h_score(batch=batch)
c.g_score = node.g_score + node.operation[0]['cost']
elif algo == ALGO_A_STAR_NAIVE:
c.h_score = c.get_h_score_rule()
c.g_score = node.g_score + node.operation[0]['cost']
# Check if destination has been found, if it is, return.
if c.h_score == 0:
if c == goal_node:
final_node = c
open_q.put(c)
cost_q[c] = c.f_score
if debug >= 2:
if c.parent:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", c.parent.node_id, "depth:", c.depth, c, c.contents
else:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", "None", "depth:", c.depth, c, c.contents
return final_node, open_q, closed_nodes
c.f_score = c.g_score + epsilon * c.h_score
if (c not in cost_q or (c in cost_q and c.f_score < cost_q[c])) and c.f_score < float("inf"):
open_q.put(c)
cost_q[c] = c.f_score
if debug >= 2:
if c.parent:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", c.parent.node_id, "depth:", c.depth, c, c.contents
else:
print "***", "f_score:", c.f_score, "h_score:", c.h_score, "g_score:", c.g_score, "id:", c.node_id, "p_id:", "None", "depth:", c.depth, c, c.contents
if open_q_cache:
while open_q.qsize() > 0:
open_q_cache.put(open_q.get())
return final_node, open_q_cache, closed_nodes
else:
return final_node, open_q, closed_nodes
def extract_table(raw_data):
if len(raw_data) == 1 and len(raw_data[0]) == 1:
input_str = raw_data[0][0]
rows = input_str.splitlines()
delimiter_list = ["\t", ",", " "]
quotechar_list = ["'", '"']
for delimiter in delimiter_list:
for quote_char in quotechar_list:
temp_table = list(csv.reader(rows, delimiter=delimiter, quotechar=quote_char))
row_len = set()
for row in temp_table:
row_len.add(len(row))
if len(row_len) == 1:
return temp_table
return raw_data
else:
return raw_data
def main():
final_node = None
open_nodes = None
closed_nodes = None
FoofahNode.if_awa = False
#
# Command Line Arguments
#
parser = argparse.ArgumentParser()
parser.add_argument('--details', action='store_true', default=False,
help="Print the detailed synthesized programs and intermediate tables")
parser.add_argument('--input', type=str, nargs='+',
help="List of input test data files separated by spaces")
parser.add_argument('--debug_level', type=int, default=0,
help="Debug level. 0 = none, 1 = simple, etc.")
parser.add_argument('--timeout', type=int, default=300,
help="Search will stop after this many seconds.")
parser.add_argument('--auto_read', action='store_true', help="Automatically read csv file using csv reader")
parser.add_argument('--validate', action='store_true', default=False,
help="Validating the correctness of synthesized program")
parser.add_argument('--search_algo', type=int, default=1,
help="Searh algorithm: 0 = BFS, 1 (default) = A*, 2 = naive heuristic")
parser.add_argument('--no_batch', action='store_true', default=False, help="Disable batch")
parser.add_argument('--weight', type=float, default=1, help="Weighted A*")
parser.add_argument('--bound', type=float, default=float("inf"), help="Depth bound")
parser.add_argument('--p1off', action='store_true', default=False, help="turn off prune rule 1")
parser.add_argument('--p2off', action='store_true', default=False, help="turn off prune rule 2")
parser.add_argument('--p3off', action='store_true', default=False, help="turn off prune rule 3")
parser.add_argument('--globalPruneOff', action='store_true', default=False, help="turn off global pruning rules")
parser.add_argument('--opPruneOff', action='store_true', default=False, help="turn off operator pruning rules")
parser.add_argument('--wrap1off', action='store_true', default=False, help="turn off 1st wrap operator")
parser.add_argument('--wrap2off', action='store_true', default=False, help="turn off 2nd wrap operator")
parser.add_argument('--wrap3off', action='store_true', default=False, help="turn off 3rd wrap operator")
#
# Read Command Line Arguments
#
args = parser.parse_args()
if_detail = args.details
input_files = args.input
debug_level = args.debug_level
timeout = args.timeout
if_auto_read = False
if args.auto_read:
if_auto_read = True
if_validate = args.validate
search_algo = args.search_algo
if_batch = not args.no_batch
epsilon = args.weight
bound = args.bound
p1off = args.p1off
p2off = args.p2off
p3off = args.p3off
op_prune_off = args.opPruneOff
wrap1off = args.wrap1off
wrap2off = args.wrap2off
wrap3off = args.wrap3off
if op_prune_off:
Operations.PRUNE_1 = False
if wrap1off:
Operations.WRAP_1 = False
if wrap2off:
Operations.WRAP_2 = False
if wrap3off:
Operations.WRAP_3 = False
global_prune_off = args.globalPruneOff
if global_prune_off:
p1off = True
p2off = True
p3off = True
if input_files is None or len(input_files) == 0:
print "*** No test input file specified. ***"
exit()
test_files = input_files
for test_file in test_files:
with open(test_file, 'rb') as f:
test_data = json.load(f)
raw_data = [map(str, x) for x in test_data['InputTable']]
target = [map(str, x) for x in test_data['OutputTable']]
if if_auto_read:
raw_data = extract_table(raw_data)
start = timer()
if search_algo == ALGO_BFS:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, algo=search_algo,
p1=not p1off, p2=not p2off, p3=not p3off)
elif search_algo == ALGO_A_STAR:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, epsilon=epsilon,
bound=bound, algo=search_algo, p1=not p1off,
p2=not p2off,
p3=not p3off)
elif search_algo == ALGO_A_STAR_NAIVE:
final_node, open_nodes, closed_nodes = a_star_search(raw_data, target, Operations.add_ops(), debug_level,
timeout, batch=if_batch, epsilon=epsilon,
bound=bound, algo=search_algo, p1=not p1off,
p2=not p2off,
p3=not p3off)
end = timer()
if final_node:
path = reconstruct_path(final_node)
# Some statistics
num_visited = len(closed_nodes)
nodes_created = open_nodes.qsize() + len(closed_nodes)
poly = np.ones(len(path) + 1)
poly[len(path)] = -nodes_created
branch_factor = max(np.real(np.roots(poly)))
if not if_detail:
program = create_python_prog(path, raw_data)
print "#", "-" * 50
print "# A Program Has Been Successfully Synthesized"
print "#"
print "# Input file:", test_file
print "# Total operations:", len(path) - 1
print "# Time elapsed: %.3f s Nodes visited: %d Nodes created: %d" % (
(end - start), num_visited, nodes_created)
print "# Naive branching factor: %d Effective branching factor: %.2f" % (
len(Operations.add_ops()), branch_factor)
print "# Make child time: %.2f s Heuristic time: %.2f s" % (
sum(final_node.times['children']), sum(final_node.times['scores']))
print "#", "-" * 50
print
print program
else:
print "-" * 50
train_data = []
for i, n in enumerate(reversed(path)):
# Operations including transpose, unfold and unfold_header do not have parameters
if len(n.operation) > 1:
if n.operation[1]:
print "%2d. %-13s at %d: H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.operation[1], n.h_score, len(path) - i - 1)
else:
print "%2d. %-13s : H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.h_score, len(path) - i - 1)
print tabulate(n.contents, tablefmt="grid")
else:
print "%2d. %-13s: H-score: %.1f Actual: %d" % (
i + 1, n.operation[0]['name'], n.h_score, len(path) - i - 1)
print tabulate(n.contents, tablefmt="grid")
remaining_steps = len(path) - i - 1
if remaining_steps > 0:
temp = dict()
temp["raw_table"] = n.contents
temp["target_table"] = target
temp["steps"] = remaining_steps
train_data.append(temp)
if final_node.contents != target:
print
print "%2d. Only \"Moves\" are needed to create a extact same view as target (TO BE COMPLETED)." % (
len(path) + 1)
print
print "-" * 50
print "Input file:", test_file
print "Total operations:", len(path) - 1
print "Time elapsed: %.3f s Nodes visited: %d Nodes created: %d" % (
(end - start), num_visited, nodes_created)
print "Naive branching factor: %d Effective branching factor: %.2f" % (len(Operations.add_ops()), branch_factor)
print "Make child time: %.2f s Heuristic time: %.2f s" % (
sum(final_node.times['children']), sum(final_node.times['scores']))
if if_validate:
test_table = test_data['TestingTable']
try:
for i, node in enumerate(reversed(path)):
if i > 0:
op = node.operation[0]
if op['num_params'] == 1:
test_table = op['fxn'](test_table)
else:
test_table = op['fxn'](test_table, node.operation[1])
except:
test_table = None
if test_table:
test_data["TransformedTestTable"] = test_table
test_data["Success"] = True
print "-" * 50
print "Experiment 1: Apply the synthetic program on other data"
print "-" * 30
print "Testing Table"
print tabulate(test_data['TestingTable'], tablefmt="grid")
print "Transformed Table"
print tabulate(test_data["TransformedTestTable"], tablefmt="grid")
print "-" * 30
print "Result: Success"
print "-" * 50
else:
test_data["TransformedTestTable"] = test_table
test_data["Success"] = False
print "-" * 50
print "Experiment 1: Apply the synthetic program on other data"
print "-" * 30
print "Testing Table"
print tabulate(test_data['TestingTable'], tablefmt="grid")
print "-" * 30
print "Result: Failure"
print "-" * 50
dirname = os.getcwd() + "/test_results/validate"
filename = dirname + "/exp0_results_" + str(test_data['TestName']) + "_" + str(
test_data['NumSamples']) + ".txt"
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError:
raise
with open(filename, 'w') as outfile:
json.dump(test_data, outfile)
else:
print "*** Solution Not Found ***"
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
import os
import re
from collections import OrderedDict
import PyXMCDA as px
from lxml import etree
HEADER = ("<?xml version='1.0' encoding='UTF-8'?>\n"
"<xmcda:XMCDA xmlns:xmcda='http://www.decision-deck.org/2012/XMCDA-2.2.0'\n"
" xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'\n"
" xsi:schemaLocation='http://www.decision-deck.org/2012/XMCDA-2.2.0 http://www.decision-deck.org/xmcda/_downloads/XMCDA-2.2.0.xsd'>\n")
FOOTER = "</xmcda:XMCDA>"
###############################################################################
# Data structures etc. #
###############################################################################
class Vividict(dict):
def __missing__(self, key):
value = self[key] = type(self)()
return value
class InputData(object):
# same as: InputData = type('InputData', (object,), {})
pass
def _create_data_object(params):
obj = InputData()
for p in params:
setattr(obj, p, None)
return obj
###############################################################################
# Shared 'business logic'. #
###############################################################################
def get_relation_type(x, y, outranking):
"""Determines an exact type of relation for (x, y) based on the outranking
relation produced by the 'cutRelationCrisp' module.
"""
if outranking[x][y] and outranking[y][x]:
relation = 'indifference'
elif outranking[x][y] and not outranking[y][x]:
relation = 'preference'
elif not outranking[x][y] and not outranking[y][x]:
relation = 'incomparability'
else:
relation = None
return relation
def get_linear(pref_directions, criterion, x, y, threshold):
"""Check if the given threshold is defined as linear and if yes, then
calculate its value - otherwise (i.e. when the threshold is a constant)
just return it w/o any processing.
In most cases it may be a good idea to wrap this function using
functools.partial and pass here only the 'threshold' argument.
"""
if type(threshold) is not dict: # true when threshold is constant
value = threshold
else:
# we calculate the thresold value from the alternative (or profile)
# which performs weaker on the given criterion
if pref_directions[criterion] == 'max':
perf = y if x > y else x
if pref_directions[criterion] == 'min':
perf = x if x > y else y
slope = threshold.get('slope', 0)
intercept = threshold.get('intercept', 0)
value = slope * perf + intercept
return value
def omega(pref_directions, criterion, x, y):
if pref_directions[criterion] == 'max':
return x - y
if pref_directions[criterion] == 'min':
return y - x
###############################################################################
# Getting the input data and related stuff. #
# Functions prefixed with the underscore are meant for the internal use only. #
###############################################################################
def get_dirs(args):
input_dir = args.get('-i')
output_dir = args.get('-o')
for d in (input_dir, output_dir):
if not os.path.isdir(d):
raise RuntimeError("Directory '{}' doesn't exist. Aborting."
.format(d))
return input_dir, output_dir
def _get_trees(input_dir, filenames):
trees = {}
for f, is_optional in filenames:
file_name = os.path.join(input_dir, f)
if not os.path.isfile(file_name):
if is_optional:
continue
else:
raise RuntimeError("Problem with the input file: '{}'."
.format(f))
tree = None
tree = px.parseValidate(file_name)
if tree is None:
raise RuntimeError("Validation error with the file: '{}'."
.format(f))
tree_name = os.path.splitext(f)[0]
# although we use 'classes' and 'classes_profiles' in the names of
# the input files and in the documentation, we want to use them as
# 'categories' (and 'categories_profiles') internally
if 'classes' in tree_name:
tree_name = tree_name.replace('classes', 'categories')
trees.update({tree_name: tree})
return trees
def _get_thresholds(xmltree):
"""This is basically the same as px.getConstantThresholds, but with the
added ability to get linear thresholds as well.
"""
thresholds = {}
for criterion in xmltree.findall('.//criterion') :
criterion_id = criterion.get('id')
xml_thresholds = criterion.find('thresholds')
if xml_thresholds is not None :
crit_thresholds = {}
for xml_threshold in xml_thresholds.findall('threshold') :
xml_constant = xml_threshold.find('constant')
if xml_constant is not None:
xml_val = xml_constant.find('real')
if xml_val is None :
xml_val = xml_constant.find('integer')
if xml_val is not None :
mcda_concept = xml_threshold.get('mcdaConcept')
if mcda_concept is not None:
crit_thresholds[mcda_concept] = float(xml_val.text)
xml_linear = xml_threshold.find('linear')
if xml_linear is not None:
xml_slope = xml_linear.find('slope/real')
if xml_slope is None:
xml_slope = xml_linear.find('slope/integer')
xml_intercept = xml_linear.find('intercept/real')
if xml_intercept is None:
xml_intercept = xml_linear.find('intercept/integer')
if xml_slope is not None or xml_intercept is not None:
mcda_concept = xml_threshold.get('mcdaConcept')
if mcda_concept is not None:
if xml_slope is not None:
slope = float(xml_slope.text)
else:
slope = 0.0
if xml_intercept is not None:
intercept = float(xml_intercept.text)
else:
intercept = 0.0
threshold = {'slope': slope, 'intercept': intercept}
crit_thresholds[mcda_concept] = threshold
thresholds[criterion_id] = crit_thresholds
else:
thresholds[criterion_id] = {}
return thresholds
def _get_intersection_distillation(xmltree, altId):
"""Allows for using 'intersection_distillation.xml' file instead of
'outranking.xml'.
"""
mcdaConcept = 'Intersection of upwards and downwards distillation'
strSearch = (".//alternativesComparisons"
"[@mcdaConcept=\'" + mcdaConcept + "\']")
comparisons = xmltree.xpath(strSearch)
if comparisons is None or len(comparisons) == 0:
return
else :
comparisons = comparisons[0]
datas = {}
for pair in comparisons.findall ("pairs/pair") :
init = pair.find("initial/alternativeID").text
term = pair.find("terminal/alternativeID").text
if altId.count(init) > 0 :
if altId.count(term) > 0 :
if not(datas.has_key(init)) :
datas[init] = {}
datas[init][term] = 1.0
return datas
def _get_outranking(xmltree, mcda_concept=None):
if xmltree is None:
return None
if mcda_concept == None :
str_search = ".//alternativesComparisons"
else :
str_search = (".//alternativesComparisons"
"[@mcdaConcept=\'" + mcda_concept + "\']")
comparisons = xmltree.xpath(str_search)
if comparisons is None or len(comparisons)==0:
return {}
comparisons = comparisons[0]
if comparisons is None:
return {}
else:
ret = Vividict()
for pair in comparisons.findall("pairs/pair"):
initial = pair.find("initial/alternativeID").text
terminal = pair.find("terminal/alternativeID").text
ret[initial][terminal] = True
return ret
def _get_alternatives_comparisons(xmltree, alternatives, profiles = None,
categories_profiles=None, use_partials=False, use_value=True,
mcda_concept=None) :
"""Parameter 'use_partials' designates whether the input contains 'partial'
(i.e. per-criterion) comparisons.
"""
def _get_value(value_node):
if value_node.find('integer') is not None:
value = int(value_node.find('integer').text)
elif value_node.find('real') is not None:
value = float(value_node.find('real').text)
elif value_node.find('label') is not None:
value = value_node.find('label').text
elif value_node.find('boolean') is not None:
value = value_node.find('boolean').text
if value == 'true':
value = True
elif value == 'false':
value = False
else:
value = None
else:
value = None
return value
if xmltree is None:
return None
if mcda_concept == None :
str_search = ".//alternativesComparisons"
else :
str_search = (".//alternativesComparisons"
"[@mcdaConcept=\'" + mcda_concept + "\']")
comparisons = xmltree.xpath(str_search)
if comparisons is None or len(comparisons) == 0:
return {}
comparisons = comparisons[0]
if comparisons == None:
return {}
else:
ret = Vividict()
for pair in comparisons.findall("pairs/pair"):
initial = pair.find("initial/alternativeID").text
terminal = pair.find("terminal/alternativeID").text
if use_value:
if not use_partials:
value_node = pair.find("value")
if value_node is None:
f = os.path.split(xmltree.base)[-1]
raise RuntimeError("Corrupted '{}' file or wrong value of "
"the 'use_partials' parameter."
.format(f))
value = _get_value(value_node)
else:
value_nodes = pair.find("values")
if value_nodes is None:
f = os.path.split(xmltree.base)[-1]
raise RuntimeError("Corrupted '{}' file or wrong value of "
"the 'use_partials' parameter."
.format(f))
values = Vividict()
for value_node in value_nodes:
value_node_id = value_node.get("id")
values[value_node_id] = _get_value(value_node)
else:
value = 1
if initial in alternatives or initial in profiles or initial in categories_profiles :
if terminal in alternatives or terminal in profiles or terminal in categories_profiles :
if initial not in ret:
ret[initial] = Vividict()
ret[initial][terminal] = values if use_partials else value
return ret
# XXX not sure if it's a good idea to return two different data structures
# here, i.e.: for boundary profiles: ['b1', 'b2', 'b3', 'b4'], for central
# profiles: {'b1': 'C2', 'b2': 'C2', 'b3': 'C3'}.
def _get_categories_profiles(tree, comparison_with):
def _get_profiles_ordering(last_found, profiles):
"""Gets the ordering of categories (classes) profiles."""
for i in categories_profiles_full.values():
if i.get('lower') == last_found:
if i.get('upper') is None:
return
profiles.append(i.get('upper'))
last_found = profiles[-1]
break
_get_profiles_ordering(last_found, profiles)
if tree is None and comparison_with in ('boundary_profiles',
'central_profiles'):
raise RuntimeError("Missing definitions of profiles (did you "
"forget about 'classes_profiles.xml'?).")
if comparison_with == 'alternatives':
categories_profiles = None
elif comparison_with == 'boundary_profiles':
categories_profiles = []
# ####### different options which are available here:
# ### categories_profiles e.g. ['pMG', 'pBM']
# path = '//categoriesProfiles//alternativeID/text()'
# categories_profiles = [profile for profile in tree.xpath(path)]
# ### categories_names e.g. ['Bad', 'Medium', 'Good']
# categories_names = list(set(tree.xpath('//categoriesProfiles//limits//categoryID/text()')))
# ### categories_profiles_full e.g.:
# {'Bad': {'upper': 'pBM'}, 'Medium': {'upper': 'pMG', 'lower': 'pBM'},
# 'Good': {'lower': 'pMG'}}
# categories_profiles_full = px.getCategoriesProfiles(tree, categories_names)
categories_names = list(set(tree.xpath('//categoriesProfiles//limits//categoryID/text()')))
categories_profiles_full = px.getCategoriesProfiles(tree, categories_names)
_get_profiles_ordering(None, categories_profiles)
elif comparison_with == 'central_profiles':
categories_profiles = {}
for xmlprofile in tree.findall(".//categoryProfile"):
try:
profile_id = xmlprofile.find("alternativeID").text
category = xmlprofile.find("central/categoryID").text
categories_profiles[profile_id] = category
except:
categories_profiles = {}
break
else:
raise RuntimeError("Wrong comparison type ('{}') specified."
.format(comparison_with))
return categories_profiles
def _get_criteria_interactions(xmltree, criteria_allowed):
"""In the returned dict 'interactions', the most outer key designates
direction of the interaction effect (i.e. which criterion is affected),
which is significant in case of 'antagonistic' interaction.
"""
interaction_types_allowed = ['strengthening', 'weakening', 'antagonistic']
path = 'criteriaValues[@mcdaConcept="criteriaInteractions"]/criterionValue'
interactions = {}
cvs = xmltree.xpath(path)
if not cvs:
raise RuntimeError("Wrong or missing definitions for criteria "
"interactions.")
for cv in cvs:
interaction_type = cv.attrib.get('mcdaConcept')
if interaction_type not in interaction_types_allowed:
raise RuntimeError("Wrong interaction type '{}'."
.format(interaction_type))
criteria_involved = cv.xpath('.//criterionID/text()')
if len(criteria_involved) != 2:
raise RuntimeError("Wrong number of criteria for '{}' interaction."
.format(interaction_type))
for criterion in criteria_involved:
if criterion not in criteria_allowed:
raise RuntimeError("Unknown criterion '{}' for '{}' interaction."
.format(criterion, interaction_type))
interaction_value = float(cv.find('./value//').text)
if ((interaction_value > 0 and interaction_type == 'weakening') or
(interaction_value < 0 and interaction_type in ('strengthening','antagonistic')) or
(interaction_value == 0)):
raise RuntimeError("Wrong value for '{}' interaction."
.format(interaction_type))
if interaction_type == 'strengthening' and 'weakening' in interactions.keys():
for i in interactions['weakening']:
if set(i[:2]) == set(criteria_involved):
raise RuntimeError("'strengthening' and 'weakening' "
"interactions are mutually exclusive.")
elif interaction_type == 'weakening' and 'strengthening' in interactions.keys():
for i in interactions['strengthening']:
if set(i[:2]) == set(criteria_involved):
raise RuntimeError("'strengthening' and 'weakening' "
"interactions are mutually exclusive.")
c1, c2 = criteria_involved
try:
interactions[interaction_type].append((c1, c2, interaction_value))
except KeyError:
interactions.update({interaction_type: [(c1, c2, interaction_value)]})
return interactions
# XXX this function is big and ugly *as hell*, but at least it's a bit easier
# to maintain than it used to be, since everything now is in one place instead
# of being scattered amongst many different modules.
def get_input_data(input_dir, filenames, params, **kwargs):
trees = _get_trees(input_dir, filenames)
d = _create_data_object(params)
for p in params:
if p == 'alternatives':
d.alternatives = px.getAlternativesID(trees['alternatives'])
elif p == 'profiles':
d.profiles = px.getProfilesID(trees['profiles'])
elif p == 'categories_profiles':
comparison_with = kwargs.get('comparison_with')
if comparison_with is None:
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
d.categories_profiles = _get_categories_profiles(trees.get('categories_profiles'),
comparison_with)
elif p == 'categories_rank':
categories = px.getCategoriesID(trees['categories'])
d.categories_rank = px.getCategoriesRank(trees['categories'], categories)
elif p == 'comparison_with':
d.comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
elif p == 'concordance':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = kwargs.get('comparison_with')
if (trees.has_key('methos_parameters')):
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if kwargs.get('use_partials') is not None:
use_partials = kwargs.get('use_partials')
else:
if (trees.has_key('methos_parameters')):
parameter = px.getParameterByName(trees['method_parameters'], 'use_partials')
use_partials = True if parameter == 'true' else False
categories_profiles = None
profiles = None
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
if comparison_with == 'profiles':
profiles = px.getProfilesID(trees['profiles'])
d.concordance = _get_alternatives_comparisons(trees['concordance'], alternatives, profiles = profiles,
categories_profiles=categories_profiles,
use_partials=use_partials)
elif p == 'crisp_concordance':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = kwargs.get('comparison_with')
if (trees.has_key('methos_parameters')):
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if kwargs.get('use_partials') is not None:
use_partials = kwargs.get('use_partials')
else:
if (trees.has_key('methos_parameters')):
parameter = px.getParameterByName(trees['method_parameters'], 'use_partials')
use_partials = True if parameter == 'true' else False
categories_profiles = None
profiles = None
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
if comparison_with == 'profiles':
profiles = px.getProfilesID(trees['profiles'])
d.concordance = _get_alternatives_comparisons(trees['concordance'], alternatives, profiles = profiles,
categories_profiles=categories_profiles,
use_partials=use_partials, use_value=False)
elif p == 'credibility':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = kwargs.get('comparison_with')
if not comparison_with:
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
else:
categories_profiles = None
eliminate_cycles_method = px.getParameterByName(trees.get('method_parameters'),
'eliminate_cycles_method')
tree = trees.get('credibility')
if eliminate_cycles_method == 'cut_weakest' and tree is None:
raise RuntimeError("'cut_weakest' option requires credibility as "
"an additional input (apart from outranking).")
d.credibility = _get_alternatives_comparisons(tree, alternatives,
categories_profiles=categories_profiles)
elif p == 'criteria':
if trees.has_key('criteria'):
d.criteria = px.getCriteriaID(trees['criteria'])
elif p == 'cut_threshold':
cut_threshold = px.getParameterByName(trees['method_parameters'], 'cut_threshold')
if cut_threshold is None or not (0 <= float(cut_threshold) <= 1):
raise RuntimeError(
"'cut_threshold' should be in range [0, 1] "
"(most commonly used values are 0.6 or 0.7)."
)
d.cut_threshold = cut_threshold
# 'cv_crossed' == 'counter-veto crossed'
elif p == 'cv_crossed':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
else:
categories_profiles = None
d.cv_crossed = _get_alternatives_comparisons(trees['counter_veto_crossed'],
alternatives,
categories_profiles=categories_profiles,
use_partials=True,
mcda_concept='counterVetoCrossed')
elif p == 'discordance':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = kwargs.get('comparison_with')
if (trees.has_key('methos_parameters')):
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if kwargs.get('use_partials') is not None:
use_partials = kwargs.get('use_partials')
else:
if (trees.has_key('methos_parameters')):
parameter = px.getParameterByName(trees['method_parameters'], 'use_partials')
use_partials = True if parameter == 'true' else False
categories_profiles = None
profiles = None
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
if comparison_with == 'profiles':
profiles = px.getProfilesID(trees['profiles'])
d.discordance = _get_alternatives_comparisons(trees['discordance'], alternatives, profiles = profiles,
categories_profiles=categories_profiles,
use_partials=use_partials)
elif p == 'crisp_discordance':
alternatives = px.getAlternativesID(trees['alternatives'])
comparison_with = kwargs.get('comparison_with')
if (trees.has_key('methos_parameters')):
comparison_with = px.getParameterByName(trees['method_parameters'], 'comparison_with')
if kwargs.get('use_partials') is not None:
use_partials = kwargs.get('use_partials')
else:
if (trees.has_key('methos_parameters')):
parameter = px.getParameterByName(trees['method_parameters'], 'use_partials')
use_partials = True if parameter == 'true' else False
categories_profiles = None
profiles = None
if comparison_with in ('boundary_profiles', 'central_profiles'):
categories_profiles = _get_categories_profiles(trees['categories_profiles'],
comparison_with)
if comparison_with == 'profiles':
profiles = px.getProfilesID(trees['profiles'])
d.discordance = _get_alternatives_comparisons(trees['discordance'], alternatives, profiles = profiles,
categories_profiles=categories_profiles,
use_partials=use_partials, use_value=False)
elif p == 'preorder':
if (trees.has_key('preorder')):
alternatives = px.getAlternativesID(trees['alternatives'])
d.preorder = px.getAlternativeValue(trees['preorder'], alternatives, None)
elif p == 'downwards':
alternatives = px.getAlternativesID(trees['alternatives'])
d.downwards = px.getAlternativeValue(trees['downwards'], alternatives, None)
elif p == 'upwards':
alternatives = px.getAlternativesID(trees['alternatives'])
d.upwards = px.getAlternativeValue(trees['upwards'], alternatives, None)
elif p == 'eliminate_cycles_method':
d.eliminate_cycles_method = px.getParameterByName(trees['method_parameters'],
'eliminate_cycles_method')
elif p == 'interactions':
criteria = px.getCriteriaID(trees['criteria'])
d.interactions = _get_criteria_interactions(trees['interactions'], criteria)
elif p == 'outranking':
alternatives = px.getAlternativesID(trees['alternatives'])
outranking = _get_intersection_distillation(trees['outranking'], alternatives)
if outranking == None:
outranking = px.getAlternativesComparisons(trees['outranking'], alternatives)
if outranking == {}:
outranking = _get_outranking(trees['outranking'])
d.outranking = outranking
elif p == 'nonoutranking':
if trees.has_key("nonoutranking"):
alternatives = px.getAlternativesID(trees['alternatives'])
nonoutranking = _get_intersection_distillation(trees['nonoutranking'], alternatives)
if nonoutranking == None:
nonoutranking = px.getAlternativesComparisons(trees['nonoutranking'], alternatives)
if nonoutranking == {}:
nonoutranking = _get_outranking(trees['nonoutranking'])
d.nonoutranking = nonoutranking
elif p == 'performances':
d.performances = px.getPerformanceTable(trees['performance_table'], None, None)
elif p == 'pref_directions':
criteria = px.getCriteriaID(trees['criteria'])
d.pref_directions = px.getCriteriaPreferenceDirections(trees['criteria'], criteria)
elif p == 'profiles_performance_table':
if comparison_with in ('boundary_profiles', 'central_profiles'):
tree = trees.get('profiles_performance_table')
if tree is None:
msg = ("Missing profiles performance table (did you forget "
"to provide 'profiles_performance_table.xml' file?).")
raise RuntimeError(msg)
d.profiles_performance_table = px.getPerformanceTable(tree, None, None)
else:
d.profiles_performance_table = None
elif p == 'reinforcement_factors':
criteria = px.getCriteriaID(trees['criteria'])
factors = {}
for c in criteria:
rf = px.getCriterionValue(trees['reinforcement_factors'], c,
'reinforcement_factors')
if len(rf) == 0:
continue
if rf.get(c) <= 1:
msg = ("Reinforcement factor for criterion '{}' should be "
"higher than 1.0 (ideally between 1.2 and 1.5).")
raise RuntimeError(msg)
factors.update(rf)
d.reinforcement_factors = factors
elif p == 'thresholds':
criteria = px.getCriteriaID(trees['criteria'])
d.thresholds = _get_thresholds(trees['criteria'])
elif p == 'weights':
criteria = px.getCriteriaID(trees['criteria'])
d.weights = px.getCriterionValue(trees['weights'], criteria)
elif p == 'z_function':
d.z_function = px.getParameterByName(trees['method_parameters'], 'z_function')
elif p == 'with_denominator':
parameter = px.getParameterByName(trees['method_parameters'], 'with_denominator')
d.with_denominator = True if parameter == 'true' else False
elif p == 'use_partials':
parameter = px.getParameterByName(trees['method_parameters'], 'use_partials')
d.use_partials = True if parameter == 'true' else False
elif p == 'use_pre_veto':
parameter = px.getParameterByName(trees['method_parameters'], 'use_pre_veto')
d.use_pre_veto = True if parameter == 'true' else False
elif p == 'alpha':
d.alpha = px.getParameterByName(trees['method_parameters'], 'alpha')
elif p == 'beta':
d.beta = px.getParameterByName(trees['method_parameters'], 'beta')
elif p == 's1':
d.s1 = px.getParameterByName(trees['method_parameters'], 's1')
elif p == 's2':
d.s2 = px.getParameterByName(trees['method_parameters'], 's2')
elif p == 'crisp_outranking':
d.crisp_outranking = px.getParameterByName(trees['method_parameters'], 'crisp_outranking')
elif p == 'direction':
d.direction = px.getParameterByName(trees['method_parameters'], 'direction')
elif p == 'conc_threshold':
d.conc_threshold = px.getParameterByName(trees['method_parameters'], 'conc_threshold')
elif p == 'disc_threshold':
d.disc_threshold = px.getParameterByName(trees['method_parameters'], 'disc_threshold')
elif p == 'comprehensive':
d.comprehensive = px.getParameterByName(trees['method_parameters'], 'comprehensive')
else:
raise RuntimeError("Unknown parameter '{}' specified.".format(p))
return d
###############################################################################
# Converting the output into the XMCDA format. #
###############################################################################
# 'comparables' should be a tuple e.g. (('a01', 'a02', 'a03'), ('b01', 'b02')).
# The order of nodes in xml file will be derived from its content.
# All the sorting should be done here (i.e. before serialization), I think.
def comparisons_to_xmcda(comparisons, comparables, use_partials=False,
mcda_concept=None, with_profile=False):
trm = 'alternativeID'
#if with_profile == True:
# trm = 'categoryID'
# XXX maybe it's better to get/set those types globally?
# (i.e. for the whole file)
def _get_value_type(value):
if type(value) == float:
value_type = 'real'
elif type(value) == int:
value_type = 'integer'
elif type(value) in (str, unicode):
value_type = 'label'
elif type(value) == bool:
value_type = 'boolean'
else:
raise RuntimeError("Unknown type '{}'.".format(type(value)))
return value_type
if len(comparables) != 2:
raise RuntimeError("You have to specify exactly 2 comparables for "
"this serialization function (instead of {})."
.format(len(ordering)))
elif comparables[0] == comparables[1]: # alternatives vs alternatives
ordering = [(a, b) for a in comparables[0] for b in comparables[0]]
else: # alternatives vs profiles
ordering = []
for a in comparables[0]:
for b in comparables[1]:
ordering.append((a, b))
if with_profile == False:
for b in comparables[1]:
for a in comparables[0]:
ordering.append((b, a))
if not mcda_concept:
xmcda = etree.Element('alternativesComparisons')
else:
xmcda = etree.Element('alternativesComparisons',
mcdaConcept=mcda_concept)
pairs = etree.SubElement(xmcda, 'pairs')
for alt1, alt2 in ordering:
if not comparisons.has_key(alt1): continue
if not comparisons[alt1].has_key(alt2): continue
pair = etree.SubElement(pairs, 'pair')
initial = etree.SubElement(pair, 'initial')
alt_id = etree.SubElement(initial, 'alternativeID')
alt_id.text = alt1
terminal = etree.SubElement(pair, 'terminal')
alt_id = etree.SubElement(terminal, trm)
alt_id.text = alt2
if not use_partials:
value_type = _get_value_type(comparisons[alt1][alt2])
value_node = etree.SubElement(pair, 'value')
v = etree.SubElement(value_node, value_type)
if value_type == 'boolean':
v.text = 'true' if comparisons[alt1][alt2] is True else 'false'
else:
v.text = str(comparisons[alt1][alt2])
else:
values = etree.SubElement(pair, 'values')
items = comparisons[alt1][alt2].items()
items.sort(key=lambda x: x[0]) # XXX until I find better solution
for i in items:
value_type = _get_value_type(i[1])
value_node = etree.SubElement(values, 'value', id=i[0])
v = etree.SubElement(value_node, value_type)
if value_type == 'boolean':
v.text = 'true' if i[1] is True else 'false'
else:
v.text = str(i[1])
return xmcda
def outranking_to_xmcda(outranking, mcda_concept=None):
def _extract(dict_in, list_of_tuples_out, outer_key=None):
"""Extracts a list of (k, v) tuples from nested dicts."""
for key, value in dict_in.iteritems():
if isinstance(value, dict):
_extract(value, list_of_tuples_out, outer_key=key)
elif isinstance(value, bool):
list_of_tuples_out.append((outer_key, key))
return list_of_tuples_out
if not mcda_concept:
xmcda = etree.Element('alternativesComparisons')
else:
xmcda = etree.Element('alternativesComparisons',
mcdaConcept=mcda_concept)
pairs_node = etree.SubElement(xmcda, 'pairs')
pairs = []
_extract(outranking, pairs)
# tuples are sorted lexographically, so there's no need for lambda as a key
pairs.sort()
for pair in pairs:
pair_node = etree.SubElement(pairs_node, 'pair')
initial_node = etree.SubElement(pair_node, 'initial')
alt_node = etree.SubElement(initial_node, 'alternativeID')
alt_node.text = pair[0]
terminal_node = etree.SubElement(pair_node, 'terminal')
alt_node = etree.SubElement(terminal_node, 'alternativeID')
alt_node.text = pair[1]
return xmcda
def ranks_to_xmcda(outranking, type, mcda_concept=None):
def _extract(dict_in, list_of_tuples_out, outer_key=None):
for k in dict_in:
list_of_tuples_out.append((k, str(dict_in[k])))
return list_of_tuples_out
if not mcda_concept:
xmcda = etree.Element('alternativesValues')
else:
xmcda = etree.Element('alternativesValues',
mcdaConcept=mcda_concept)
pairs = []
_extract(outranking, pairs)
# tuples are sorted lexographically, so there's no need for lambda as a key
pairs.sort()
for pair in pairs:
alternatives_node = etree.SubElement(xmcda, 'alternativeValue')
alt_node = etree.SubElement(alternatives_node, 'alternativeID')
alt_node.text = pair[0]
value_node = etree.SubElement(alternatives_node, 'value')
type_node = etree.SubElement(value_node, type)
type_node.text = pair[1]
return xmcda
# XXX maybe passing alternatives as a second argument and using them for
# sorting would be a good idea here?
def assignments_to_xmcda(assignments):
xmcda = etree.Element('alternativesAffectations')
for assignment in sorted(assignments.items(), key=lambda x: x[0]):
alt_assignment = etree.SubElement(xmcda,'alternativeAffectation')
alt_id = etree.SubElement(alt_assignment, 'alternativeID')
alt_id.text = assignment[0]
category_id = etree.SubElement(alt_assignment, 'categoryID')
category_id.text = assignment[1]
return xmcda
# XXX maybe passing alternatives as a second argument and using them for
# sorting would be a good idea here?
def assignments_as_intervals_to_xmcda(assignments):
xmcda = etree.Element('alternativesAffectations')
for assignment in sorted(assignments.items(), key=lambda x: x[0]):
alt_assignment = etree.SubElement(xmcda, 'alternativeAffectation')
alt_id = etree.SubElement(alt_assignment, 'alternativeID')
alt_id.text = assignment[0]
categories_interval = etree.SubElement(alt_assignment,
'categoriesInterval')
# 'descending', 'pessimistic', 'conjunctive'
lower_bound = etree.SubElement(categories_interval, 'lowerBound')
category_id = etree.SubElement(lower_bound, 'categoryID')
category_id.text = assignment[1][0]
# 'ascending', 'optimistic', 'disjunctive'
upper_bound = etree.SubElement(categories_interval, 'upperBound')
category_id = etree.SubElement(upper_bound, 'categoryID')
category_id.text = assignment[1][1]
return xmcda
###############################################################################
# Dealing with the output files etc. #
###############################################################################
def write_xmcda(xmcda, filename):
et = etree.ElementTree(xmcda)
try:
with open(filename, 'w') as f:
f.write(HEADER)
et.write(f, pretty_print=True, encoding='UTF-8')
f.write(FOOTER)
except IOError as e:
raise IOError("{}: '{}'".format(e.strerror, e.filename))
def print_xmcda(xmcda):
"""Takes etree.Element as input and pretty-prints it."""
print(etree.tostring(xmcda, pretty_print=True))
def get_error_message(err):
exception = re.findall("\.([a-zA-Z]+)'", str(type(err)))[0]
err_msg = ': '.join((exception, str(err)))
return err_msg
def create_messages_file(error_messages, log_messages, out_dir):
if not out_dir:
return
xmcda = etree.Element('methodMessages')
if error_messages:
for err_msg in error_messages:
err_msg_node = etree.SubElement(xmcda, 'errorMessage')
err_msg_node_text = etree.SubElement(err_msg_node, 'text')
err_msg_node_text.text = etree.CDATA(err_msg.strip())
if log_messages:
for log_msg in log_messages:
log_msg_node = etree.SubElement(xmcda, 'logMessage')
log_msg_node_text = etree.SubElement(log_msg_node, 'text')
log_msg_node_text.text = etree.CDATA(log_msg.strip())
write_xmcda(xmcda, os.path.join(out_dir, 'messages.xml'))
|
|
#!/usr/bin/env python
"""
"""
__docformat__ = 'restructuredtext'
__version__ = '$Id: //depot/task/DEV-99/client/tests.py#13 $'
import sys
sys.path.insert(0, '../../')
sys.path.insert(0, '../layout/')
from pyglet.window import *
from pyglet import clock
from pyglet.gl import *
from pyglet import media
import layout
from wydget import GUI
from wydget import event, dialogs, dragndrop, anim, layouts, widgets, loadxml
if len(sys.argv) > 1:
if '--help' in sys.argv:
print('%s [test_file.xml] [--dump] [--once]' % sys.argv[0])
print(' test_file.xml -- a single XML file to display (see tests/)')
print(' --dump -- text dump of constructed GUI objects')
print(' --once -- render the GUI exactly once and exit')
sys.exit(0)
print('-' * 75)
print('To exit the test, hit <escape> or close the window')
print('-' * 75)
else:
print('-' * 75)
print('To move on to the next test, hit <escape>.')
print('Close the window to exit the tests.')
print('-' * 75)
window = Window(width=800, height=600, vsync=False, resizable=True)
# clock.set_fps_limit(10)
fps = clock.ClockDisplay(color=(1, .5, .5, 1))
window.push_handlers(fps)
class MyEscape:
has_exit = False
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE:
self.has_exit = True
return event.EVENT_HANDLED
my_escape = MyEscape()
window.push_handlers(my_escape)
def run(xml_file):
gui = GUI(window)
loadxml.fromFile(gui, xml_file)
if '--dump' in sys.argv:
print('-' * 75)
gui.dump()
print('-' * 75)
window.push_handlers(gui)
gui.push_handlers(dragndrop.DragHandler('.draggable'))
@gui.select('#press-me')
def on_click(widget, *args):
print('on_click', widget)
return event.EVENT_HANDLED
@gui.select('#enable-other')
def on_click(widget, *args):
w = gui.get('#press-me')
w.setEnabled(not w.isEnabled())
return event.EVENT_HANDLED
@gui.select('button, text-button')
def on_click(widget, *args):
print('DEBUG', widget, 'PRESSED')
return event.EVENT_UNHANDLED
@gui.select('.show-value')
def on_change(widget, value):
print('DEBUG', widget, 'VALUE CHANGED', repr(value))
return event.EVENT_UNHANDLED
@gui.select('frame#menu-test', 'on_click')
def on_menu(w, x, y, button, modifiers, click_count):
if not widgets.PopupMenu.isActivatingClick(button, modifiers):
return event.EVENT_UNHANDLED
gui.get('#test-menu').expose((x, y))
return event.EVENT_HANDLED
@gui.select('.hover')
def on_element_enter(widget, *args):
print('ENTER ELEMENT', widget.id)
return event.EVENT_HANDLED
@gui.select('.hover')
def on_element_leave(widget, *args):
print('LEAVE ELEMENT', widget.id)
return event.EVENT_HANDLED
@gui.select('.drawer-control')
def on_click(widget, *args):
id = widget.id.replace('drawer-control', 'test-drawer')
gui.get('#' + id).toggle_state()
return event.EVENT_HANDLED
@gui.select('#question-dialog-test')
def on_click(widget, *args):
def f(*args):
print('DIALOG SAYS', args)
dialogs.Question(widget.getGUI(), 'Did this appear correctly?',
callback=f).run()
return event.EVENT_HANDLED
@gui.select('#message-dialog-test')
def on_click(widget, *args):
def f(*args):
print('DIALOG SAYS', args)
dialogs.Message(widget.getGUI(), 'Hello, World!', callback=f).run()
return event.EVENT_HANDLED
@gui.select('#music-test')
def on_click(widget, x, y, button, modifiers, click_count):
if not button & mouse.RIGHT:
return event.EVENT_UNHANDLED
def load_music(file=None):
if not file:
return
gui.get('#music-test').delete()
m = widgets.Music(gui, file, id='music-test', playing=True)
m.gainFocus()
dialogs.FileOpen(gui, callback=load_music).run()
return event.EVENT_HANDLED
@gui.select('#movie-test')
def on_click(widget, x, y, button, modifiers, click_count):
if not button & mouse.RIGHT:
return event.EVENT_UNHANDLED
def load_movie(file=None):
print('DIALOG SELECTION:', file)
if not file:
return
gui.get('#movie-test').delete()
m = widgets.Movie(gui, file, id='movie-test', playing=True)
m.gainFocus()
dialogs.FileOpen(gui, callback=load_movie).run()
return event.EVENT_HANDLED
@gui.select('#movie-test')
def on_text(widget, text):
if text == 'f':
gui.get('#movie-test').video.pause()
anim.Delayed(gui.get('#movie-test').video.play, duration=10)
window.set_fullscreen()
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drop(widget, x, y, button, modifiers, element):
element.reparent(widget)
widget.bgcolor = (1, 1, 1, 1)
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drag_enter(widget, x, y, element):
widget.bgcolor = (.8, 1, .8, 1)
return event.EVENT_HANDLED
@gui.select('.droppable')
def on_drag_leave(widget, x, y, element):
widget.bgcolor = (1, 1, 1, 1)
return event.EVENT_HANDLED
try:
sample = gui.get('#xhtml-sample')
except KeyError:
sample = None
if sample:
@layout.select('#click-me')
def on_mouse_press(element, x, y, button, modifiers):
print('CLICK ON', element)
return event.EVENT_HANDLED
sample.label.push_handlers(on_mouse_press)
if gui.has('.progress-me'):
class Progress:
progress = 0
direction = 1
def animate(self, dt):
self.progress += dt * self.direction
if self.progress > 5:
self.progress = 5
self.direction = -1
elif self.progress < 0:
self.progress = 0
self.direction = 1
for e in gui.get('.progress-me'):
e.value = self.progress / 5.
animate_progress = Progress().animate
clock.schedule(animate_progress)
my_escape.has_exit = False
while not (window.has_exit or my_escape.has_exit):
clock.tick()
window.dispatch_events()
media.dispatch_events()
glClearColor(.2, .2, .2, 1)
glClear(GL_COLOR_BUFFER_BIT)
gui.draw()
fps.draw()
window.flip()
if '--once' in sys.argv:
window.close()
sys.exit()
if '--dump' in sys.argv:
print('-' * 75)
gui.dump()
print('-' * 75)
if gui.has('.progress-me'):
clock.unschedule(animate_progress)
# reset everything
window.pop_handlers()
gui.delete()
window.set_size(800, 600)
return window.has_exit
if len(sys.argv) > 1:
run(sys.argv[1])
else:
import os
for file in os.listdir('tests'):
if not file.endswith('.xml'):
continue
if not os.path.isfile(os.path.join('tests', file)):
continue
print('Running', file)
if run(os.path.join('tests', file)):
break
window.close()
|
|
class ReverseComplement(object): #declares an object class. We capitalize the first letter (unlike variables that should start with lowercase) to avoid potential collisions with variable names
def __init__(self, sequence, reverse = True, case = 'upper'): #this is a special function (within an object, a function is usually called a method) that must be present in (almost) every object class. It is implicitly called when the object is instantiated. For more on instantiation, see the main() running code.
case = case.lower()
if not case in ['upper','lower','original']:
raise ValueError('Case option must be set to upper, lower, or original.')
self.case = case
if reverse: #we defined an optional argument "reverse" to be true. We did this because we assume people will often want the reverse complement. They have to specify reverse = False if they don't.
self.inputSeq = sequence[::-1] #Neat trick in Python to reverse the order of an iterable (string, list, etc). Indexing goes [inclusive start:non-inclusive end:step]. A step of -1 tells it to start from the end and step backwards 1 element at a time. This seems to run slightly more efficiently than iterating in reverse.
else: #if the user wants a non-reverse complement
self.inputSeq = sequence #we store the value without reversing. The self.[name] means that this value will be variable that can be called from anywhere within this instance of the object AND from outside the object by calling [instance].[name]. A variable that is tied to a function like this one is called an attribute.
self.inputSeq = self.inputSeq.upper() #for convenience, we are working entirely with uppercase letters. If the user supplies lowercase, we raise them. If we did not, we would get an exception when we look through our complement dictionary. A possible improvement to this would be letting the users choose to have the return in lowercase or even preserve their original case. How might we do that?
self.complementTable = self.createComplementTable() #this is defining an attribute of the object (complementTable) by calling the createComplementTable method. Of interest, since the table is just returned by the function, a program could call the table for its own use by calling [instance].createComplementTable()
self.complementLists = self.createComplementLists() #same as above, but this one gets back all non-degenerate possibilities
self.checkInput() #always good to validate inputs. This will handle any invalid letters entered. It will still raise an exception, but will be more specific in the error reporting.
if self.case == 'original':
self.lowercaseList = self.getCaseList(sequence)
self.outputSeqString = self.createOutputString() #Creates the outputString (the reverse complement). Because this is called in the __init__ initializer method, we automatically calculate the reverse complement (why this is convenient will be covered in the __str__ overload method)
self.outputList = False #this initializes an attribute to False. Why we want to do this will be covered as part of a later method.
def __str__(self): #this is overloading the existing str(object) method. Normally, if I tried to print(thisObject), I would either get an exception or a bunch of rubbish back.
return self.outputSeqString #Instead, this says that if I try to print the entire object or turn it to a string, what I REALLY want to get back is the outputSeqString I created in the initialization function
def getCaseList(self, sequence):
caseList = []
for letter in sequence:
if letter.islower():
caseList.append(True)
else:
caseList.append(False)
return caseList
def reconcileCases(self, sequence):
sequenceList = list(sequence)
for i in range(0,len(sequenceList)):
if self.lowercaseList[i]:
sequenceList[i] = sequenceList[i].lower()
return "".join(sequenceList)
def createComplementTable(self): #Simple method that returns a fixed dictionary. Because it is a built-in method of the class, it can be called from within as self.createComplementTable or from outside the object as [instance].createComplementaryTable.
complementTable = {"A":"T",
"T":"A",
"G":"C",
"C":"G",
"Y":"R",
"R":"Y",
"S":"S",
"W":"W",
"K":"M",
"M":"K",
"B":"V",
"D":"H",
"H":"D",
"V":"B",
"N":"N"}
return complementTable
def createComplementLists(self): #Same concept as the previous method. I should note that these could have both been stuck inside the initializer method, but this makes for much cleaner code.
complementLists = {"A":["T"],
"T":["A"],
"G":["C"],
"C":["G"],
"Y":["G","A"],
"R":["T","C"],
"S":["C","G"],
"W":["T","A"],
"K":["A","C"],
"M":["T","G"],
"B":["G","C","A"],
"D":["T","C","A"],
"H":["T","G","A"],
"V":["T","G","C"],
"N":["T","G","C","A"]}
return complementLists
def checkInput(self): #Input validation
for letter in self.inputSeq: #iterate over the input letters
if letter not in list(self.complementTable.keys()): #get a list of keys from the complement table, and if a letter is in the input sequence that is not a key in the table
raise ValueError(letter + " in " + self.inputSeq + " is not a valid DNA base.") #Raise an exception that explicitly lists what the problem was and where. Help the user help themselves.
def createOutputString(self): #This simple function creates our most basic output: a reverse complement string containing any degeneracy that may have been in the original
output = "" #intialize an empty string
for letter in self.inputSeq: #iterate over our input string (which, if appropriate was reversed in the initializer)
output += self.complementTable[letter] #add on the proper complementary base to the growing output string
if self.case == 'original':
output = self.reconcileCases(output)
return output #return the output
def permutations(self): #turn a sequence containing degenerate bases into a list of all possible non-degenerate sequences
import itertools #this library contains the code we need to create all possible permutations and probably does so more efficiently than our own code would
if self.outputList: #if we already have the value we are trying to create here (and we can tell because it is no longer the False value we initialized it to)
return self.outputList #we avoid repeating previous work and just output what we already have stored. As will be shown in the test code below, the work required for this function can grow exponentially. We only want to run it if it is requested AND we only ever want to run it the one time.
letterList = [] #initialize an empty list to store a list of lists, where the outer list will correspond to the letters of the sequence and each inner list will represent all possibilities for that letter
for letter in self.inputSeq: #iterate over the input sequence
letterList.append(self.complementLists[letter]) #add a list of possible bases to a growing list of possible bases at each position
self.outputList = [''.join(letter) for letter in itertools.product(*letterList)] #use the itertools module's product function to create the permutations (if this line seems strange to you, try looking up list comprehension in python and positional arguments, commonly called *args)
if self.case == 'original':
self.outputList = [self.reconcileCases(line) for line in self.outputList]
return self.outputList #return the (potentially quite large) list
class RNAReverseComplement(ReverseComplement): #declare another class called RNAReverseComplement as an extension of the ReverseComplement base class
def createComplementTable(self): #because the only real change we need is the inclusion of Uracil, we are overriding the table creation methods from the base class to include it. Otherwise, everything else is the same.
complementTable = {"A":"U",
"T":"A",
"U":"A",
"G":"C",
"C":"G",
"Y":"R",
"R":"Y",
"S":"S",
"W":"W",
"K":"M",
"M":"K",
"B":"V",
"D":"H",
"H":"D",
"V":"B",
"N":"N"}
return complementTable
def createComplementLists(self): #as before, this overrides the base class method. We could also add entirely new methods.
complementLists = {"A":["U"],
"T":["A"],
"U":["A"],
"G":["C"],
"C":["G"],
"Y":["G","A"],
"R":["U","C"],
"S":["C","G"],
"W":["U","A"],
"K":["A","C"],
"M":["U","G"],
"B":["G","C","A"],
"D":["U","C","A"],
"H":["U","G","A"],
"V":["U","G","C"],
"N":["U","G","C","A"]}
return complementLists
class CaseImprovedReverseComplement(ReverseComplement):
def __init__(self, sequence, reverse = True, case = 'original'):
case = case.lower()
if not case in ['upper','lower','original']:
raise ValueError('Case option must be set to upper, lower, or original.')
self.case = case
if reverse: #we defined an optional argument "reverse" to be true. We did this because we assume people will often want the reverse complement. They have to specify reverse = False if they don't.
self.inputSeq = sequence[::-1] #Neat trick in Python to reverse the order of an iterable (string, list, etc). Indexing goes [inclusive start:non-inclusive end:step]. A step of -1 tells it to start from the end and step backwards 1 element at a time. This seems to run slightly more efficiently than iterating in reverse.
else: #if the user wants a non-reverse complement
self.inputSeq = sequence #we store the value without reversing. The self.[name] means that this value will be variable that can be called from anywhere within this instance of the object AND from outside the object by calling [instance].[name]. A variable that is tied to a function like this one is called an attribute.
if self.case == 'upper': #now that case is being handled by the dictionary itself, we just need to change the original sequence if necessary
self.inputSeq = self.inputSeq.upper()
elif self.case == 'lower':
self.inputSeq == self.inputSeq.lower()
self.complementTable = self.createComplementTable() #this is defining an attribute of the object (complementTable) by calling the createComplementTable method. Of interest, since the table is just returned by the function, a program could call the table for its own use by calling [instance].createComplementTable()
self.complementLists = self.createComplementLists() #same as above, but this one gets back all non-degenerate possibilities
self.checkInput() #always good to validate inputs. This will handle any invalid letters entered. It will still raise an exception, but will be more specific in the error reporting.
self.outputSeqString = self.createOutputString() #Creates the outputString (the reverse complement). Because this is called in the __init__ initializer method, we automatically calculate the reverse complement (why this is convenient will be covered in the __str__ overload method)
self.outputList = False #this initializes an attribute to False. Why we want to do this will be covered as part of a later method.
def createComplementTable(self): #Will this work faster is we just define the values by case in our dictionary?
complementTable = {"A":"T",
"T":"A",
"G":"C",
"C":"G",
"Y":"R",
"R":"Y",
"S":"S",
"W":"W",
"K":"M",
"M":"K",
"B":"V",
"D":"H",
"H":"D",
"V":"B",
"N":"N",
"a":"t",
"t":"a",
"g":"c",
"c":"g",
"y":"r",
"r":"y",
"s":"s",
"w":"w",
"k":"m",
"m":"k",
"b":"v",
"d":"h",
"h":"d",
"v":"b",
"n":"n"}
return complementTable
def createComplementLists(self):
complementLists = {"A":["T"],
"T":["A"],
"G":["C"],
"C":["G"],
"Y":["G","A"],
"R":["T","C"],
"S":["C","G"],
"W":["T","A"],
"K":["A","C"],
"M":["T","G"],
"B":["G","C","A"],
"D":["T","C","A"],
"H":["T","G","A"],
"V":["T","G","C"],
"N":["T","G","C","A"],
"a":["t"],
"t":["a"],
"g":["c"],
"c":["g"],
"y":["g","a"],
"r":["t","c"],
"s":["c","g"],
"w":["t","a"],
"k":["a","c"],
"m":["t","g"],
"b":["g","c","a"],
"d":["t","c","a"],
"h":["t","g","a"],
"v":["t","g","c"],
"n":["t","g","c","a"]}
return complementLists
def createOutputString(self): #This simple function creates our most basic output: a reverse complement string containing any degeneracy that may have been in the original
output = "" #intialize an empty string
for letter in self.inputSeq: #iterate over our input string (which, if appropriate was reversed in the initializer)
output += self.complementTable[letter] #add on the proper complementary base to the growing output string
return output #return the output
def permutations(self): #turn a sequence containing degenerate bases into a list of all possible non-degenerate sequences
import itertools #this library contains the code we need to create all possible permutations and probably does so more efficiently than our own code would
if self.outputList: #if we already have the value we are trying to create here (and we can tell because it is no longer the False value we initialized it to)
return self.outputList #we avoid repeating previous work and just output what we already have stored. As will be shown in the test code below, the work required for this function can grow exponentially. We only want to run it if it is requested AND we only ever want to run it the one time.
letterList = [] #initialize an empty list to store a list of lists, where the outer list will correspond to the letters of the sequence and each inner list will represent all possibilities for that letter
for letter in self.inputSeq: #iterate over the input sequence
letterList.append(self.complementLists[letter]) #add a list of possible bases to a growing list of possible bases at each position
self.outputList = [''.join(letter) for letter in itertools.product(*letterList)] #use the itertools module's product function to create the permutations (if this line seems strange to you, try looking up list comprehension in python and positional arguments, commonly called *args)
return self.outputList #return the (potentially quite large) list
def main():
import datetime #This module is just for timing how long it takes for certain things to happen, and is otherwise not necessary for actual data processing.
test = ReverseComplement("atgcnyag") #THIS IS INSTANTIATION. We have defined an object by the name of test with the ReverseComplement class.
rnatest = RNAReverseComplement("atgcnyag") #Now we instantiate an object of our extended RNA class
print(test) #If the overloading of the __str__ worked, we should get back our reverse complement directly from this. If not, we get an exception or a bunch of rubbish. Try this again with the overloading method commented out and see how it behaves.
print(rnatest) #Inheritance is the concept that an extension of a base class (such as RNAReverseComplement) inherits the base attributes and methods, so long as they were not overridden (as the table creation methods were).
start = datetime.datetime.now() #Simply marks the start time for what we are about to do
dna = test.permutations() #creates the list of permutations of non-degenerate bases
runTime = str(datetime.datetime.now()-start) #Once the permutations are all made, subtract the start time from the current time to see about how long that took. This should have been relatively painless
print("Shorter template took: " + runTime) #Report back the result
rna = rnatest.permutations() #Get the RNA permutations
for i in range(0,5): #prints a sample of the results from both the RNA and DNA templates
print(dna[i])
print(str(len(dna)) + " elements")
for i in range(0,5):
print(rna[i])
test = ReverseComplement("atgnnnnkwsrycgtvhdnn") #overwrites the previous instance of the ReverseComplement class stored as test with the new one base off this longer, more degenerate sequence
rnatest = RNAReverseComplement("atgnnnnkwsrycgtvhdnn")
print(test) #and again we show how the overloaded __str__ operator works. Of note, other operators, such as comparisons and arithmetic can be overloaded as well
print(rnatest)
start = datetime.datetime.now() #initiate a timer as we did above
dna = test.permutations() #get all the permutations for this much worse sequence
runTime = str(datetime.datetime.now()-start)
print("Longer template took: " + runTime) #report back how long it took (this should have taken over a second to determine)
rna = rnatest.permutations()
start = datetime.datetime.now() #initiate a timer again
dna = test.permutations() #get all the permutations for this much worse sequence, but remember how we included a catch before to reuse the previously-calculated value of we had one?
runTime = str(datetime.datetime.now()-start)
print("Pre-calculated long template took: " + runTime) #report back how long it took (this should be better, since we just report back what was already calculated). Now that you have an idea of how much time this can take, and how much memory can be occupied by it, does it make sense that this was left out of the initializer method? Imagine if this were being run on thousands of sequences or millions where we didn't need all the permutations. So much wasted time.
for i in range(0,5):
print(dna[i])
print(str(len(dna)) + " elements") #because this can grow exponentially
for i in range(0,5):
print(rna[i])
print(ReverseComplement("atgcnyag")) #Something else we can get away with because of overloading
revComp = RNAReverseComplement("atgcnyag")
print(revComp)
otherInstance = ReverseComplement("agaattccttccagaa") #we can instantiate a completely new object of the ReverseComplement class called, conviently enough, otherInstance
print(otherInstance)
print(rnatest) #note that because these objects are two completely different instances, they are only structured and behave the same... they do not interfere with one another or interact unless we make them
caseTest = ReverseComplement("AtgNNnnkwSRYcgtvhdnn", case = 'original')
print(caseTest)
start = datetime.datetime.now() #initiate a timer as we did above
dna = caseTest.permutations() #get all the permutations for this much worse sequence
runTime = str(datetime.datetime.now()-start)
print("Case preserved longer template took: " + runTime) #report back how long it took (this should have taken over a second to determine) OUCH! THAT WAS PAINFUL. Let's try something a little better.
caseTest = CaseImprovedReverseComplement("AtgNNnnkwSRYcgtvhdnn", case = 'original')
print(caseTest)
start = datetime.datetime.now() #initiate a timer as we did above
dna = caseTest.permutations() #get all the permutations for this much worse sequence
runTime = str(datetime.datetime.now()-start)
print("Alternative case preserved longer template took: " + runTime)
main()
|
|
# -*- coding:utf-8 -*-
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Ulric Qin'
from .bean import Bean
from rrd.config import MAINTAINERS
from rrd.model.portal.action import Action
from rrd.model.user import User
class Expression(Bean):
_tbl = 'expression'
_cols = 'id, expression, func, op, right_value, max_step, priority, note, action_id, create_user, pause'
def __init__(self, _id, expression, func, op, right_value, max_step, priority, note, action_id,
create_user, pause):
self.id = _id
self.expression = expression
self.func = func
self.op = op
self.right_value = right_value
self.max_step = max_step
self.priority = priority
self.note = note
self.action_id = action_id
self.create_user = create_user
self.pause = pause
self.action = None
@classmethod
def save_or_update(cls, expression_id, expression, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, login_user):
if not expression.startswith('each('):
return 'only support each expression. e.g. each(metric=? xx=yy)'
if not 'metric=' in expression:
return 'expression is invalid. e.g. each(metric=? xx=yy)'
left = expression.find('(')
right = expression.find(')')
if left <= 0:
return 'left parentheses ( not found'
if right <= 0:
return 'right parentheses ) not found'
in_parentheses = expression[left + 1:right]
in_parentheses = ' '.join(in_parentheses.replace(',', ' ').replace(';', ' ').split())
arr = in_parentheses.split()
arr = [item for item in arr if '=' in item]
if len(arr) < 2:
return 'expression is invalid. e.g. each(metric=? xx=yy)'
expression = 'each(%s)' % in_parentheses
if expression_id:
return cls.update_expression(expression_id, expression, func, op, right_value, uic_groups, max_step,
priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail)
else:
return cls.insert_expression(expression, func, op, right_value, uic_groups, max_step,
priority, note, url, callback,
before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, login_user)
@classmethod
def insert_expression(cls, content, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail, user_name):
action_id = Action.insert({
'uic': uic_groups,
'url': url,
'callback': callback,
'before_callback_sms': before_callback_sms,
'before_callback_mail': before_callback_mail,
'after_callback_sms': after_callback_sms,
'after_callback_mail': after_callback_mail,
})
if not action_id:
return 'save action fail'
expression_id = Expression.insert({
'expression': content,
'func': func,
'op': op,
'right_value': right_value,
'max_step': max_step,
'priority': priority,
'note': note,
'action_id': action_id,
'create_user': user_name
})
if expression_id:
return ''
return 'save expression fail'
@classmethod
def update_expression(cls, expression_id, content, func, op, right_value, uic_groups, max_step, priority, note, url,
callback, before_callback_sms, before_callback_mail,
after_callback_sms, after_callback_mail):
e = Expression.get(expression_id)
if not e:
return 'no such expression %s' % expression_id
a = Action.get(e.action_id)
if not a:
return 'no relation action'
Action.update_dict(
{
'uic': uic_groups,
'url': url,
'callback': callback,
'before_callback_sms': before_callback_sms,
'before_callback_mail': before_callback_mail,
'after_callback_sms': after_callback_sms,
'after_callback_mail': after_callback_mail
},
'id=%s',
[a.id]
)
Expression.update_dict(
{
'expression': content,
'func': func,
'op': op,
'right_value': right_value,
'max_step': max_step,
'priority': priority,
'note': note,
},
'id=%s',
[e.id]
)
return ''
@classmethod
def query(cls, page, limit, query, me=None):
where = ''
params = []
if me is not None:
where = 'create_user = %s'
params.append(me)
if query:
where += ' and ' if where else ''
where += 'expression like %s'
params.append('%' + query + '%')
vs = cls.select_vs(where=where, params=params, page=page, limit=limit)
total = cls.total(where=where, params=params)
return vs, total
def writable(self, login_user):
#login_user can be str or User obj
if isinstance(login_user, str):
login_user = User.get_by_name(login_user)
if not login_user:
return False
if login_user.is_admin() or login_user.is_root():
return True
if self.create_user == login_user.name:
return True
if login_user.name in MAINTAINERS:
return True
a = self.action
if not a:
return False
if not a.uic:
return False
return login_user.in_teams(a.uic)
def to_json(self):
return {
"id": self.id,
"expression": self.expression,
"func": self.func,
"op": self.op,
"right_value": self.right_value,
"max_step": self.max_step,
"priority": self.priority,
"note": self.note,
"action_id": self.action_id
}
|
|
"""
Tests donut/modules/courses
"""
import flask
import json
import pytest
from donut.testing.fixtures import client
from donut import app
from donut.modules.courses import helpers, routes
def test_planner(client):
rv = client.get(flask.url_for('courses.planner'))
assert rv.status_code == 200
def test_scheduler(client):
rv = client.get(flask.url_for('courses.scheduler'))
assert rv.status_code == 200
def test_planner_courses(client):
rv = client.get(flask.url_for('courses.planner_courses'))
assert rv.status_code == 200
data = json.loads(rv.data)
for course in data: # sort term-id pairs
id_terms = sorted(zip(course['ids'], course['terms']))
course['ids'] = [course_id for course_id, _ in id_terms]
course['terms'] = [term for _, term in id_terms]
assert data == [
{
'ids': [6],
'instructor': 'Meyerowitz, E / Zinn, K',
'name': 'Principles of Biology',
'number': 'Bi 1',
'terms': [3],
'units': [4, 0, 5]
},
{
'ids': [7, 8, 9],
'instructor':
None, # 2 are with 'Mendez, J', and 1 with 'Jendez, M'
'name': 'Experimental Methods in Solar Energy Conversion',
'number': 'Ch 3x',
'terms': [1, 2, 3],
'units': [1, 3, 2]
},
{
'ids': [1],
'instructor': 'Pinkston, D',
'name': 'Operating Systems',
'number': 'CS 124',
'terms': [1],
'units': [3, 6, 3]
},
{
'ids': [3],
'instructor': 'Umans, C',
'name': 'Decidability and Tractability',
'number': 'CS 21',
'terms': [2],
'units': [3, 0, 6]
},
{
'ids': [5],
'instructor': 'Vidick, T',
'name': 'Algorithms',
'number': 'CS 38',
'terms': [3],
'units': [3, 0, 6]
},
{
'ids': [4],
'instructor': None,
'name': 'Calculus of One and Several Variables and Linear Algebra',
'number': 'Ma 1b',
'terms': [2],
'units': [4, 0, 5]
},
{
'ids': [2],
'instructor': 'Cheung, C',
'name': 'Classical Mechanics and Electromagnetism',
'number': 'Ph 1a',
'terms': [1],
'units': [4, 0, 5]
}
]
def test_scheduler_courses(client):
# Test nonexistant term
rv = client.get(
flask.url_for('courses.scheduler_courses', year=2018, term=2))
assert rv.status_code == 200
assert json.loads(rv.data) == []
# Test actual term
rv = client.get(
flask.url_for('courses.scheduler_courses', year=2019, term=2))
assert rv.status_code == 200
assert json.loads(rv.data) == [{
'id':
8,
'name':
'Experimental Methods in Solar Energy Conversion',
'number':
'Ch 3x',
'sections': [{
'grades': 'PASS-FAIL',
'instructor': 'Mendez, J',
'number': 1,
'times': 'F 09:00 - 09:55\nW 13:00 - 15:55',
'locations': '147 NYS\n107 MEAD'
}],
'units': [1, 3, 2]
}, {
'id':
3,
'name':
'Decidability and Tractability',
'number':
'CS 21',
'sections': [{
'grades': 'LETTER',
'instructor': 'Umans, C',
'number': 1,
'times': 'MWF 13:00 - 13:55',
'locations': '105 ANB'
}],
'units': [3, 0, 6]
}, {
'id':
4,
'name':
'Calculus of One and Several Variables and Linear Algebra',
'number':
'Ma 1b',
'sections': [{
'grades': 'PASS-FAIL',
'instructor': 'Kechris, A',
'number': 1,
'times': 'MWF 10:00 - 10:55\nR 09:00 - 09:55',
'locations': '119 KRK\n103 DWN'
}, {
'grades': 'PASS-FAIL',
'instructor': 'Kechris, A',
'number': 2,
'times': 'MWF 10:00 - 10:55\nR 09:00 - 09:55',
'locations': '119 KRK\n119 DWN'
}, {
'grades': 'PASS-FAIL',
'instructor': 'Rains, E',
'number': 7,
'times': 'MWF 10:00 - 10:55\nR 09:00 - 09:55',
'locations': '310 LINDE\nB111 DWN'
}, {
'grades': 'PASS-FAIL',
'instructor': 'Rains, E',
'number': 8,
'times': 'R 10:00 - 10:55\nMWF 10:00 - 10:55',
'locations': '142 KCK\n310 LINDE'
}],
'units': [4, 0, 5]
}]
def test_planner_mine(client):
# Test when not logged in
rv = client.get(flask.url_for('courses.planner_mine'))
assert rv.status_code == 200
assert json.loads(rv.data) == {'courses': [], 'placeholders': []}
rv = client.get(
flask.url_for('courses.planner_add_course', course_id=1, year=2))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Must be logged in to save'
}
rv = client.get(
flask.url_for('courses.planner_drop_course', course_id=1, year=2))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Must be logged in to save'
}
# Test courses list when no courses have been added
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(flask.url_for('courses.planner_mine'))
assert rv.status_code == 200
assert json.loads(rv.data) == {'courses': [], 'placeholders': []}
# Test adding some courses
rv = client.get(
flask.url_for('courses.planner_add_course', course_id=1, year=2))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(
flask.url_for('courses.planner_add_course', course_id=5, year=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(
flask.url_for('courses.planner_add_course', course_id=6, year=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
# Test adding a duplicate course (should fail)
rv = client.get(
flask.url_for('courses.planner_add_course', course_id=1, year=2))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Cannot add a class twice in the same term'
}
# Test courses list now that courses have been added; verify order
rv = client.get(flask.url_for('courses.planner_mine'))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'courses': [{
'ids': [1],
'number': 'CS 124',
'terms': [1],
'units': 12,
'year': 2
}, {
'ids': [6],
'number': 'Bi 1',
'terms': [3],
'units': 9,
'year': 1
}, {
'ids': [5],
'number': 'CS 38',
'terms': [3],
'units': 9,
'year': 1
}],
'placeholders': []
}
# Test dropping a course
rv = client.get(
flask.url_for('courses.planner_drop_course', course_id=5, year=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(flask.url_for('courses.planner_mine'))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'courses': [{
'ids': [1],
'number': 'CS 124',
'terms': [1],
'units': 12,
'year': 2
}, {
'ids': [6],
'number': 'Bi 1',
'terms': [3],
'units': 9,
'year': 1
}],
'placeholders': []
}
def test_scheduler_mine(client):
# Test when not logged in
rv = client.get(flask.url_for('courses.scheduler_mine', year=2018, term=1))
assert rv.status_code == 200
assert json.loads(rv.data) == []
rv = client.get(
flask.url_for('courses.scheduler_add_section', course=1, section=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Must be logged in to save'
}
rv = client.get(
flask.url_for('courses.scheduler_drop_section', course=1, section=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {
'success': False,
'message': 'Must be logged in to save'
}
# Test sections list when no sections have been added
with client.session_transaction() as sess:
sess['username'] = 'csander'
rv = client.get(flask.url_for('courses.scheduler_mine', year=2018, term=1))
assert rv.status_code == 200
assert json.loads(rv.data) == []
# Test adding some sections
rv = client.get(
flask.url_for('courses.scheduler_add_section', course=1, section=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(
flask.url_for('courses.scheduler_add_section', course=6, section=2))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(
flask.url_for('courses.scheduler_add_section', course=2, section=3))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
# Test sections list now that sections have been added
rv = client.get(flask.url_for('courses.scheduler_mine', year=2018, term=1))
assert rv.status_code == 200
assert sorted(
json.loads(rv.data), key=lambda course: course['id']) == [{
'id': 1,
'section': 1
}, {
'id': 2,
'section': 3
}]
rv = client.get(flask.url_for('courses.scheduler_mine', year=2018, term=3))
assert rv.status_code == 200
assert json.loads(rv.data) == [{'id': 6, 'section': 2}]
# Test dropping a section
rv = client.get(
flask.url_for('courses.scheduler_drop_section', course=1, section=1))
assert rv.status_code == 200
assert json.loads(rv.data) == {'success': True}
rv = client.get(flask.url_for('courses.scheduler_mine', year=2018, term=1))
assert rv.status_code == 200
assert json.loads(rv.data) == [{'id': 2, 'section': 3}]
|
|
# -*- coding: utf-8 -*-
import sys
import warnings
import pytest
import deprecated.classic
class MyDeprecationWarning(DeprecationWarning):
pass
_PARAMS = [
None,
((), {}),
(('Good reason',), {}),
((), {'reason': 'Good reason'}),
((), {'version': '1.2.3'}),
((), {'action': 'once'}),
((), {'category': MyDeprecationWarning}),
]
@pytest.fixture(scope="module", params=_PARAMS)
def classic_deprecated_function(request):
if request.param is None:
@deprecated.classic.deprecated
def foo1():
pass
return foo1
else:
args, kwargs = request.param
@deprecated.classic.deprecated(*args, **kwargs)
def foo1():
pass
return foo1
@pytest.fixture(scope="module", params=_PARAMS)
def classic_deprecated_class(request):
if request.param is None:
@deprecated.classic.deprecated
class Foo2(object):
pass
return Foo2
else:
args, kwargs = request.param
@deprecated.classic.deprecated(*args, **kwargs)
class Foo2(object):
pass
return Foo2
@pytest.fixture(scope="module", params=_PARAMS)
def classic_deprecated_method(request):
if request.param is None:
class Foo3(object):
@deprecated.classic.deprecated
def foo3(self):
pass
return Foo3
else:
args, kwargs = request.param
class Foo3(object):
@deprecated.classic.deprecated(*args, **kwargs)
def foo3(self):
pass
return Foo3
@pytest.fixture(scope="module", params=_PARAMS)
def classic_deprecated_static_method(request):
if request.param is None:
class Foo4(object):
@staticmethod
@deprecated.classic.deprecated
def foo4():
pass
return Foo4.foo4
else:
args, kwargs = request.param
class Foo4(object):
@staticmethod
@deprecated.classic.deprecated(*args, **kwargs)
def foo4():
pass
return Foo4.foo4
@pytest.fixture(scope="module", params=_PARAMS)
def classic_deprecated_class_method(request):
if request.param is None:
class Foo5(object):
@classmethod
@deprecated.classic.deprecated
def foo5(cls):
pass
return Foo5
else:
args, kwargs = request.param
class Foo5(object):
@classmethod
@deprecated.classic.deprecated(*args, **kwargs)
def foo5(cls):
pass
return Foo5
# noinspection PyShadowingNames
def test_classic_deprecated_function__warns(classic_deprecated_function):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
classic_deprecated_function()
assert len(warns) == 1
warn = warns[0]
assert issubclass(warn.category, DeprecationWarning)
assert "deprecated function (or staticmethod)" in str(warn.message)
assert warn.filename == __file__, 'Incorrect warning stackLevel'
# noinspection PyShadowingNames
def test_classic_deprecated_class__warns(classic_deprecated_class):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
classic_deprecated_class()
assert len(warns) == 1
warn = warns[0]
assert issubclass(warn.category, DeprecationWarning)
assert "deprecated class" in str(warn.message)
assert warn.filename == __file__, 'Incorrect warning stackLevel'
# noinspection PyShadowingNames
def test_classic_deprecated_method__warns(classic_deprecated_method):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
obj = classic_deprecated_method()
obj.foo3()
assert len(warns) == 1
warn = warns[0]
assert issubclass(warn.category, DeprecationWarning)
assert "deprecated method" in str(warn.message)
assert warn.filename == __file__, 'Incorrect warning stackLevel'
# noinspection PyShadowingNames
def test_classic_deprecated_static_method__warns(classic_deprecated_static_method):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
classic_deprecated_static_method()
assert len(warns) == 1
warn = warns[0]
assert issubclass(warn.category, DeprecationWarning)
assert "deprecated function (or staticmethod)" in str(warn.message)
assert warn.filename == __file__, 'Incorrect warning stackLevel'
# noinspection PyShadowingNames
def test_classic_deprecated_class_method__warns(classic_deprecated_class_method):
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
cls = classic_deprecated_class_method()
cls.foo5()
assert len(warns) == 1
warn = warns[0]
assert issubclass(warn.category, DeprecationWarning)
if sys.version_info >= (3, 9):
assert "deprecated class method" in str(warn.message)
else:
assert "deprecated function (or staticmethod)" in str(warn.message)
assert warn.filename == __file__, 'Incorrect warning stackLevel'
def test_should_raise_type_error():
try:
deprecated.classic.deprecated(5)
assert False, "TypeError not raised"
except TypeError:
pass
def test_warning_msg_has_reason():
reason = "Good reason"
@deprecated.classic.deprecated(reason=reason)
def foo():
pass
with warnings.catch_warnings(record=True) as warns:
foo()
warn = warns[0]
assert reason in str(warn.message)
def test_warning_msg_has_version():
version = "1.2.3"
@deprecated.classic.deprecated(version=version)
def foo():
pass
with warnings.catch_warnings(record=True) as warns:
foo()
warn = warns[0]
assert version in str(warn.message)
def test_warning_is_ignored():
@deprecated.classic.deprecated(action='ignore')
def foo():
pass
with warnings.catch_warnings(record=True) as warns:
foo()
assert len(warns) == 0
def test_specific_warning_cls_is_used():
@deprecated.classic.deprecated(category=MyDeprecationWarning)
def foo():
pass
with warnings.catch_warnings(record=True) as warns:
foo()
warn = warns[0]
assert issubclass(warn.category, MyDeprecationWarning)
def test_respect_global_filter():
@deprecated.classic.deprecated(version='1.2.1', reason="deprecated function")
def fun():
print("fun")
warnings.simplefilter("once", category=DeprecationWarning)
with warnings.catch_warnings(record=True) as warns:
fun()
fun()
assert len(warns) == 1
|
|
"""
Default settings for the ``mezzanine.core`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from __future__ import unicode_literals
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="ADMIN_MENU_ORDER",
description=_("Controls the ordering and grouping of the admin menu."),
editable=False,
default=(
(_("Content"), ("pages.Page", (_("Media Library"), "fb_browse"),)),
(_("Site"), ("sites.Site", "redirects.Redirect", "conf.Setting")),
(_("Users"), ("auth.User", "auth.Group",)),
),
)
register_setting(
name="ADMIN_MENU_COLLAPSED",
label=_("Collapse the Admin menu"),
description=_("Controls whether or not the left-hand admin menu is "
"collapsed by default."),
editable=True,
default=False,
)
register_setting(
name="ADMIN_REMOVAL",
description=_("Unregister these models from the admin."),
editable=False,
default=(),
)
register_setting(
name="ADMIN_THUMB_SIZE",
description=_("Size of thumbnail previews for image fields in the "
"admin interface."),
editable=False,
default="24x24",
)
register_setting(
name="AKISMET_API_KEY",
label=_("Akismet API Key"),
description=_("Key for http://akismet.com spam filtering service. Used "
"for filtering comments and forms."),
editable=True,
default="",
)
register_setting(
name="BITLY_ACCESS_TOKEN",
label=_("bit.ly access token"),
description=_("Access token for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="CACHE_SET_DELAY_SECONDS",
description=_("Mezzanine's caching uses a technique know as mint "
"caching. This is where the requested expiry for a cache entry "
"is stored with the cache entry in cache, and the real expiry "
"used has the ``CACHE_SET_DELAY`` added to it. Then on a cache get, "
"the store expiry is checked, and if it has passed, the cache entry "
"is set again, and no entry is returned. This tries to ensure that "
"cache misses never occur, and if many clients were to get a cache "
"miss at once, only one would actually need to re-generated the "
"cache entry."),
editable=False,
default=30,
)
dashboard_tags = (
("mezzanine_tags.app_list",),
("mezzanine_tags.recent_actions",),
(),
)
register_setting(
name="DASHBOARD_TAGS",
description=_("A three item sequence, each containing a sequence of "
"template tags used to render the admin dashboard."),
editable=False,
default=dashboard_tags,
)
register_setting(
name="DEVICE_DEFAULT",
description=_("Device specific template sub-directory to use as the "
"default device."),
editable=False,
default="",
)
register_setting(
name="DEVICE_USER_AGENTS",
description=_("Mapping of device specific template sub-directory names to "
"the sequence of strings that may be found in their user agents."),
editable=False,
default=(
("mobile", ("2.0 MMP", "240x320", "400X240", "AvantGo", "BlackBerry",
"Blazer", "Cellphone", "Danger", "DoCoMo", "Elaine/3.0",
"EudoraWeb", "Googlebot-Mobile", "hiptop", "IEMobile",
"KYOCERA/WX310K", "LG/U990", "MIDP-2.", "MMEF20", "MOT-V",
"NetFront", "Newt", "Nintendo Wii", "Nitro", "Nokia",
"Opera Mini", "Palm", "PlayStation Portable", "portalmmm",
"Proxinet", "ProxiNet", "SHARP-TQ-GX10", "SHG-i900",
"Small", "SonyEricsson", "Symbian OS", "SymbianOS",
"TS21i-10", "UP.Browser", "UP.Link", "webOS", "Windows CE",
"WinWAP", "YahooSeeker/M1A1-R2D2", "iPhone", "iPod", "Android",
"BlackBerry9530", "LG-TU915 Obigo", "LGE VX", "webOS",
"Nokia5800",)),
),
)
register_setting(
name="FORMS_USE_HTML5",
description=_("If ``True``, website forms will use HTML5 features."),
editable=False,
default=False,
)
register_setting(
name="EMAIL_FAIL_SILENTLY",
description=_("If ``True``, failures to send email will happen "
"silently, otherwise an exception is raised. "
"Defaults to ``settings.DEBUG``."),
editable=False,
default=settings.DEBUG,
)
register_setting(
name="EXTRA_MODEL_FIELDS",
description=_("A sequence of fields that will be injected into "
"Mezzanine's (or any library's) models. Each item in the sequence is "
"a four item sequence. The first two items are the dotted path to the "
"model and its field name to be added, and the dotted path to the "
"field class to use for the field. The third and fourth items are a "
"sequence of positional args and a dictionary of keyword args, to use "
"when creating the field instance. When specifying the field class, "
"the path ``django.models.db.`` can be omitted for regular Django "
"model fields."),
editable=False,
default=(),
)
register_setting(
name="GOOGLE_ANALYTICS_ID",
label=_("Google Analytics ID"),
description=_("Google Analytics ID (http://www.google.com/analytics/)"),
editable=True,
default="",
)
register_setting(
name="HOST_THEMES",
description=_("A sequence mapping host names to themes, allowing "
"different templates to be served per HTTP host. "
"Each item in the sequence is a two item sequence, "
"containing a host such as ``othersite.example.com``, and "
"the name of an importable Python package for the theme. "
"If the host is matched for a request, the templates "
"directory inside the theme package will be first searched "
"when loading templates."),
editable=False,
default=(),
)
register_setting(
name="INLINE_EDITING_ENABLED",
description=_("If ``True``, front-end inline editing will be enabled."),
editable=False,
default=True,
)
register_setting(
name="JQUERY_FILENAME",
label=_("Name of the jQuery file."),
description=_("Name of the jQuery file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-1.7.1.min.js",
)
register_setting(
name="JQUERY_UI_FILENAME",
label=_("Name of the jQuery UI file."),
description=_("Name of the jQuery UI file found in "
"mezzanine/core/static/mezzanine/js/"),
editable=False,
default="jquery-ui-1.8.2.min.js",
)
register_setting(
name="MAX_PAGING_LINKS",
label=_("Max paging links"),
description=_("Max number of paging links to display when paginating."),
editable=True,
default=10,
)
register_setting(
name="MEDIA_LIBRARY_PER_SITE",
label=_("Media library per site"),
description=_("If ``True``, each site will use its own directory within "
"the filebrowser media library."),
editable=False,
default=False,
)
register_setting(
name="OWNABLE_MODELS_ALL_EDITABLE",
description=_("Models that subclass ``Ownable`` and use the "
"``OwnableAdmin`` have their admin change-list records filtered "
"down to records owned by the current user. This setting contains a "
"sequence of models in the format ``app_label.object_name``, that "
"when subclassing ``Ownable``, will still show all records in the "
"admin change-list interface, regardless of the current user."),
editable=False,
default=(),
)
register_setting(
name="RICHTEXT_WIDGET_CLASS",
description=_("Dotted package path and class name of the widget to use "
"for the ``RichTextField``."),
editable=False,
default="mezzanine.core.forms.TinyMceWidget",
)
register_setting(
name="RICHTEXT_ALLOWED_TAGS",
description=_("List of HTML tags that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("a", "abbr", "acronym", "address", "area", "article", "aside",
"b", "bdo", "big", "blockquote", "br", "button", "caption", "center",
"cite", "code", "col", "colgroup", "dd", "del", "dfn", "dir", "div",
"dl", "dt", "em", "fieldset", "figure", "font", "footer", "form",
"h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "i", "img",
"input", "ins", "kbd", "label", "legend", "li", "map", "menu",
"nav", "ol", "optgroup", "option", "p", "pre", "q", "s", "samp",
"section", "select", "small", "span", "strike", "strong",
"sub", "sup", "table", "tbody", "td", "textarea",
"tfoot", "th", "thead", "tr", "tt", "u", "ul", "var", "wbr"),
)
register_setting(
name="RICHTEXT_ALLOWED_ATTRIBUTES",
description=_("List of HTML attributes that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("abbr", "accept", "accept-charset", "accesskey", "action",
"align", "alt", "axis", "border", "cellpadding", "cellspacing",
"char", "charoff", "charset", "checked", "cite", "class", "clear",
"cols", "colspan", "color", "compact", "coords", "datetime", "dir",
"disabled", "enctype", "for", "frame", "headers", "height", "href",
"hreflang", "hspace", "id", "ismap", "label", "lang", "longdesc",
"maxlength", "media", "method", "multiple", "name", "nohref",
"noshade", "nowrap", "prompt", "readonly", "rel", "rev", "rows",
"rowspan", "rules", "scope", "selected", "shape", "size", "span",
"src", "start", "style", "summary", "tabindex", "target", "title",
"type", "usemap", "valign", "value", "vspace", "width", "xml:lang"),
)
register_setting(
name="RICHTEXT_ALLOWED_STYLES",
description=_("List of inline CSS styles that won't be stripped from "
"``RichTextField`` instances."),
editable=False,
default=("margin-top", "margin-bottom", "margin-left", "margin-right",
"float", "vertical-align", "border", "margin"),
)
register_setting(
name="RICHTEXT_FILTERS",
description=_("List of dotted paths to functions, called in order, on a "
"``RichTextField`` value before it is rendered to the template."),
editable=False,
default=("mezzanine.utils.html.thumbnails",),
)
RICHTEXT_FILTER_LEVEL_HIGH = 1
RICHTEXT_FILTER_LEVEL_LOW = 2
RICHTEXT_FILTER_LEVEL_NONE = 3
RICHTEXT_FILTER_LEVELS = (
(RICHTEXT_FILTER_LEVEL_HIGH, _("High")),
(RICHTEXT_FILTER_LEVEL_LOW, _("Low (allows video, iframe, Flash, etc)")),
(RICHTEXT_FILTER_LEVEL_NONE, _("No filtering")),
)
register_setting(
name="RICHTEXT_FILTER_LEVEL",
label=_("Rich Text filter level"),
description=_("*Do not change this setting unless you know what you're "
"doing.*\n\nWhen content is saved in a Rich Text (WYSIWYG) field, "
"unsafe HTML tags and attributes are stripped from the content to "
"protect against staff members intentionally adding code that could "
"be used to cause problems, such as changing their account to "
"a super-user with full access to the system.\n\n"
"This setting allows you to change the level of filtering that "
"occurs. Setting it to low will allow certain extra tags to be "
"permitted, such as those required for embedding video. While these "
"tags are not the main candidates for users adding malicious code, "
"they are still considered dangerous and could potentially be "
"mis-used by a particularly technical user, and so are filtered out "
"when the filtering level is set to high.\n\n"
"Setting the filtering level to no filtering, will disable all "
"filtering, and allow any code to be entered by staff members, "
"including script tags."),
editable=True,
choices=RICHTEXT_FILTER_LEVELS,
default=RICHTEXT_FILTER_LEVEL_HIGH,
)
register_setting(
name="SEARCH_MODEL_CHOICES",
description=_("Sequence of models that will be provided by default as "
"choices in the search form. Each model should be in the format "
"``app_label.model_name``. Only models that subclass "
"``mezzanine.core.models.Displayable`` should be used."),
editable=False,
default=(["pages.Page"]),
)
register_setting(
name="SEARCH_PER_PAGE",
label=_("Search results per page"),
description=_("Number of results shown in the search results page."),
editable=True,
default=10,
)
register_setting(
name="SITE_PREFIX",
description=_("A URL prefix for mounting all of Mezzanine's urlpatterns "
"under. When using this, you'll also need to manually apply it to "
"your project's root ``urls.py`` module. The root ``urls.py`` module "
"provided by Mezzanine's ``mezzanine-project`` command contains an "
"example of this towards its end."),
editable=False,
default="",
)
register_setting(
name="SITE_TITLE",
label=_("Site Title"),
description=_("Title that will display at the top of the site, and be "
"appended to the content of the HTML title tags on every page."),
editable=True,
default="Mezzanine",
translatable=True,
)
register_setting(
name="SITE_TAGLINE",
label=_("Tagline"),
description=_("A tag line that will appear at the top of all pages."),
editable=True,
default=_("An open source content management platform."),
translatable=True,
)
register_setting(
name="SLUGIFY",
description=_("Dotted Python path to the callable for converting "
"strings into URL slugs. Defaults to "
"``mezzanine.utils.urls.slugify_unicode`` which allows for non-ascii "
"URLs. Change to ``django.template.defaultfilters.slugify`` to use "
"Django's slugify function, or something of your own if required."),
editable=False,
default="mezzanine.utils.urls.slugify_unicode",
)
register_setting(
name="SPAM_FILTERS",
description=_("Sequence of dotted Python paths to callable functions "
"used for checking posted content (such as forms or comments) is "
"spam. Each function should accept three arguments: the request "
"object, the form object, and the URL that was posted from. "
"Defaults to ``mezzanine.utils.views.is_spam_akismet`` which will "
"use the http://akismet.com spam filtering service when the "
"``AKISMET_API_KEY`` setting is configured."),
editable=False,
default=("mezzanine.utils.views.is_spam_akismet",),
)
register_setting(
name="SSL_ENABLED",
label=_("Enable SSL"),
description=_("If ``True``, users will be automatically redirected to "
"HTTPS for the URLs specified by the ``SSL_FORCE_URL_PREFIXES`` "
"setting."),
editable=True,
default=False,
)
register_setting(
name="SSL_FORCE_HOST",
label=_("Force Host"),
description=_("Host name that the site should always be accessed via that "
"matches the SSL certificate."),
editable=True,
default="",
)
register_setting(
name="SSL_FORCE_URL_PREFIXES",
description="Sequence of URL prefixes that will be forced to run over "
"SSL when ``SSL_ENABLED`` is ``True``. i.e. "
"('/admin', '/example') would force all URLs beginning with "
"/admin or /example to run over SSL.",
editable=False,
default=("/admin", "/account"),
)
register_setting(
name="SSL_FORCED_PREFIXES_ONLY",
description=_("If ``True``, only URLs specified by the "
"``SSL_FORCE_URL_PREFIXES`` setting will be accessible over SSL, "
"and all other URLs will be redirected back to HTTP if accessed "
"over HTTPS."),
editable=False,
default=True,
)
register_setting(
name="STOP_WORDS",
description=_("List of words which will be stripped from search queries."),
editable=False,
default=(
"a", "about", "above", "above", "across", "after",
"afterwards", "again", "against", "all", "almost", "alone",
"along", "already", "also", "although", "always", "am",
"among", "amongst", "amoungst", "amount", "an", "and",
"another", "any", "anyhow", "anyone", "anything", "anyway",
"anywhere", "are", "around", "as", "at", "back", "be",
"became", "because", "become", "becomes", "becoming", "been",
"before", "beforehand", "behind", "being", "below", "beside",
"besides", "between", "beyond", "bill", "both", "bottom",
"but", "by", "call", "can", "cannot", "cant", "co", "con",
"could", "couldnt", "cry", "de", "describe", "detail", "do",
"done", "down", "due", "during", "each", "eg", "eight",
"either", "eleven", "else", "elsewhere", "empty", "enough",
"etc", "even", "ever", "every", "everyone", "everything",
"everywhere", "except", "few", "fifteen", "fifty", "fill",
"find", "fire", "first", "five", "for", "former", "formerly",
"forty", "found", "four", "from", "front", "full", "further",
"get", "give", "go", "had", "has", "hasnt", "have", "he",
"hence", "her", "here", "hereafter", "hereby", "herein",
"hereupon", "hers", "herself", "him", "himself", "his",
"how", "however", "hundred", "ie", "if", "in", "inc",
"indeed", "interest", "into", "is", "it", "its", "itself",
"keep", "last", "latter", "latterly", "least", "less", "ltd",
"made", "many", "may", "me", "meanwhile", "might", "mill",
"mine", "more", "moreover", "most", "mostly", "move", "much",
"must", "my", "myself", "name", "namely", "neither", "never",
"nevertheless", "next", "nine", "no", "nobody", "none",
"noone", "nor", "not", "nothing", "now", "nowhere", "of",
"off", "often", "on", "once", "one", "only", "onto", "or",
"other", "others", "otherwise", "our", "ours", "ourselves",
"out", "over", "own", "part", "per", "perhaps", "please",
"put", "rather", "re", "same", "see", "seem", "seemed",
"seeming", "seems", "serious", "several", "she", "should",
"show", "side", "since", "sincere", "six", "sixty", "so",
"some", "somehow", "someone", "something", "sometime",
"sometimes", "somewhere", "still", "such", "system", "take",
"ten", "than", "that", "the", "their", "them", "themselves",
"then", "thence", "there", "thereafter", "thereby",
"therefore", "therein", "thereupon", "these", "they",
"thickv", "thin", "third", "this", "those", "though",
"three", "through", "throughout", "thru", "thus", "to",
"together", "too", "top", "toward", "towards", "twelve",
"twenty", "two", "un", "under", "until", "up", "upon", "us",
"very", "via", "was", "we", "well", "were", "what", "whatever",
"when", "whence", "whenever", "where", "whereafter", "whereas",
"whereby", "wherein", "whereupon", "wherever", "whether",
"which", "while", "whither", "who", "whoever", "whole", "whom",
"whose", "why", "will", "with", "within", "without", "would",
"yet", "you", "your", "yours", "yourself", "yourselves", "the",
),
)
register_setting(
name="TAG_CLOUD_SIZES",
label=_("Tag Cloud Sizes"),
description=_("Number of different sizes for tags when shown as a cloud."),
editable=True,
default=4,
)
register_setting(
name="TEMPLATE_ACCESSIBLE_SETTINGS",
description=_("Sequence of setting names available within templates."),
editable=False,
default=(
"ACCOUNTS_APPROVAL_REQUIRED", "ACCOUNTS_VERIFICATION_REQUIRED",
"ADMIN_MENU_COLLAPSED",
"BITLY_ACCESS_TOKEN",
"COMMENTS_DISQUS_SHORTNAME", "COMMENTS_NUM_LATEST",
"COMMENTS_DISQUS_API_PUBLIC_KEY", "COMMENTS_DISQUS_API_SECRET_KEY",
"COMMENTS_USE_RATINGS", "DEV_SERVER", "FORMS_USE_HTML5",
"GRAPPELLI_INSTALLED", "GOOGLE_ANALYTICS_ID", "JQUERY_FILENAME",
"JQUERY_UI_FILENAME", "LOGIN_URL", "LOGOUT_URL", "SITE_TITLE",
"SITE_TAGLINE", "USE_L10N", "USE_MODELTRANSLATION",
),
)
register_setting(
name="THUMBNAILS_DIR_NAME",
description=_("Directory name to store thumbnails in, that will be "
"created relative to the original image's directory."),
editable=False,
default=".thumbnails",
)
register_setting(
name="TINYMCE_SETUP_JS",
description=_("URL for the JavaScript file (relative to ``STATIC_URL``) "
"that handles configuring TinyMCE when the default "
"``RICHTEXT_WIDGET_CLASS`` is used."),
editable=False,
default="mezzanine/js/tinymce_setup.js",
)
register_setting(
name="UPLOAD_TO_HANDLERS",
description=_("Dict mapping file field names in the format "
"``app_label.model_name.field_name`` to the Python dotted path "
"to function names that will be used for the file field's "
"``upload_to`` argument."),
editable=False,
default={},
)
# The following settings are defined here for documentation purposes
# as this file is used to auto-generate the documentation for all
# available settings. They are Mezzanine specific, but their values
# are *always* overridden by the project's settings or local_settings
# modules, so the default values defined here will never be used.
register_setting(
name="USE_MODELTRANSLATION",
description=_("If ``True``, the django-modeltranslation application will "
"be automatically added to the ``INSTALLED_APPS`` setting."),
editable=False,
default=False,
)
register_setting(
name="NEVERCACHE_KEY",
description=_("Unique random string like ``SECRET_KEY``, but used for "
"two-phased cache responses. Like ``SECRET_KEY``, should be "
"automatically generated by the ``mezzanine-project`` command."),
editable=False,
default="",
)
|
|
"""Support for the Lovelace UI."""
import logging
import voluptuous as vol
from homeassistant.components import frontend
from homeassistant.config import async_hass_config_yaml, async_process_component_config
from homeassistant.const import CONF_FILENAME, CONF_MODE, CONF_RESOURCES
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import collection, config_validation as cv
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceCallType
from homeassistant.loader import async_get_integration
from . import dashboard, resources, websocket
from .const import (
CONF_ICON,
CONF_REQUIRE_ADMIN,
CONF_SHOW_IN_SIDEBAR,
CONF_TITLE,
CONF_URL_PATH,
DASHBOARD_BASE_CREATE_FIELDS,
DEFAULT_ICON,
DOMAIN,
MODE_STORAGE,
MODE_YAML,
RESOURCE_CREATE_FIELDS,
RESOURCE_RELOAD_SERVICE_SCHEMA,
RESOURCE_SCHEMA,
RESOURCE_UPDATE_FIELDS,
SERVICE_RELOAD_RESOURCES,
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
url_slug,
)
from .system_health import system_health_info # noqa: F401
_LOGGER = logging.getLogger(__name__)
CONF_DASHBOARDS = "dashboards"
YAML_DASHBOARD_SCHEMA = vol.Schema(
{
**DASHBOARD_BASE_CREATE_FIELDS,
vol.Required(CONF_MODE): MODE_YAML,
vol.Required(CONF_FILENAME): cv.path,
}
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN, default={}): vol.Schema(
{
vol.Optional(CONF_MODE, default=MODE_STORAGE): vol.All(
vol.Lower, vol.In([MODE_YAML, MODE_STORAGE])
),
vol.Optional(CONF_DASHBOARDS): cv.schema_with_slug_keys(
YAML_DASHBOARD_SCHEMA,
slug_validator=url_slug,
),
vol.Optional(CONF_RESOURCES): [RESOURCE_SCHEMA],
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the Lovelace commands."""
mode = config[DOMAIN][CONF_MODE]
yaml_resources = config[DOMAIN].get(CONF_RESOURCES)
frontend.async_register_built_in_panel(hass, DOMAIN, config={"mode": mode})
async def reload_resources_service_handler(service_call: ServiceCallType) -> None:
"""Reload yaml resources."""
try:
conf = await async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
integration = await async_get_integration(hass, DOMAIN)
config = await async_process_component_config(hass, conf, integration)
resource_collection = await create_yaml_resource_col(
hass, config[DOMAIN].get(CONF_RESOURCES)
)
hass.data[DOMAIN]["resources"] = resource_collection
if mode == MODE_YAML:
default_config = dashboard.LovelaceYAML(hass, None, None)
resource_collection = await create_yaml_resource_col(hass, yaml_resources)
async_register_admin_service(
hass,
DOMAIN,
SERVICE_RELOAD_RESOURCES,
reload_resources_service_handler,
schema=RESOURCE_RELOAD_SERVICE_SCHEMA,
)
else:
default_config = dashboard.LovelaceStorage(hass, None)
if yaml_resources is not None:
_LOGGER.warning(
"Lovelace is running in storage mode. Define resources via user interface"
)
resource_collection = resources.ResourceStorageCollection(hass, default_config)
collection.StorageCollectionWebsocket(
resource_collection,
"lovelace/resources",
"resource",
RESOURCE_CREATE_FIELDS,
RESOURCE_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_save_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_delete_config
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_resources
)
hass.components.websocket_api.async_register_command(
websocket.websocket_lovelace_dashboards
)
hass.data[DOMAIN] = {
# We store a dictionary mapping url_path: config. None is the default.
"dashboards": {None: default_config},
"resources": resource_collection,
"yaml_dashboards": config[DOMAIN].get(CONF_DASHBOARDS, {}),
}
if hass.config.safe_mode:
return True
async def storage_dashboard_changed(change_type, item_id, item):
"""Handle a storage dashboard change."""
url_path = item[CONF_URL_PATH]
if change_type == collection.CHANGE_REMOVED:
frontend.async_remove_panel(hass, url_path)
await hass.data[DOMAIN]["dashboards"].pop(url_path).async_delete()
return
if change_type == collection.CHANGE_ADDED:
existing = hass.data[DOMAIN]["dashboards"].get(url_path)
if existing:
_LOGGER.warning(
"Cannot register panel at %s, it is already defined in %s",
url_path,
existing,
)
return
hass.data[DOMAIN]["dashboards"][url_path] = dashboard.LovelaceStorage(
hass, item
)
update = False
else:
hass.data[DOMAIN]["dashboards"][url_path].config = item
update = True
try:
_register_panel(hass, url_path, MODE_STORAGE, item, update)
except ValueError:
_LOGGER.warning("Failed to %s panel %s from storage", change_type, url_path)
# Process YAML dashboards
for url_path, dashboard_conf in hass.data[DOMAIN]["yaml_dashboards"].items():
# For now always mode=yaml
config = dashboard.LovelaceYAML(hass, url_path, dashboard_conf)
hass.data[DOMAIN]["dashboards"][url_path] = config
try:
_register_panel(hass, url_path, MODE_YAML, dashboard_conf, False)
except ValueError:
_LOGGER.warning("Panel url path %s is not unique", url_path)
# Process storage dashboards
dashboards_collection = dashboard.DashboardsCollection(hass)
dashboards_collection.async_add_listener(storage_dashboard_changed)
await dashboards_collection.async_load()
collection.StorageCollectionWebsocket(
dashboards_collection,
"lovelace/dashboards",
"dashboard",
STORAGE_DASHBOARD_CREATE_FIELDS,
STORAGE_DASHBOARD_UPDATE_FIELDS,
).async_setup(hass, create_list=False)
return True
async def create_yaml_resource_col(hass, yaml_resources):
"""Create yaml resources collection."""
if yaml_resources is None:
default_config = dashboard.LovelaceYAML(hass, None, None)
try:
ll_conf = await default_config.async_load(False)
except HomeAssistantError:
pass
else:
if CONF_RESOURCES in ll_conf:
_LOGGER.warning(
"Resources need to be specified in your configuration.yaml. Please see the docs"
)
yaml_resources = ll_conf[CONF_RESOURCES]
return resources.ResourceYAMLCollection(yaml_resources or [])
@callback
def _register_panel(hass, url_path, mode, config, update):
"""Register a panel."""
kwargs = {
"frontend_url_path": url_path,
"require_admin": config[CONF_REQUIRE_ADMIN],
"config": {"mode": mode},
"update": update,
}
if config[CONF_SHOW_IN_SIDEBAR]:
kwargs["sidebar_title"] = config[CONF_TITLE]
kwargs["sidebar_icon"] = config.get(CONF_ICON, DEFAULT_ICON)
frontend.async_register_built_in_panel(hass, DOMAIN, **kwargs)
|
|
# Copyright 2018 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_serialization import jsonutils as json
from tempest import clients
from tempest.cmd import cleanup_service
from tempest import config
from tempest.lib import exceptions
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_credentials
from tempest.tests.lib import fake_http
class TestBaseService(base.TestCase):
class TestException(cleanup_service.BaseService):
def delete(self):
raise exceptions.NotImplemented
def dry_run(self):
raise exceptions.NotImplemented
def save_state(self):
raise exceptions.NotImplemented
def test_base_service_init(self):
kwargs = {'data': {'data': 'test'},
'is_dry_run': False,
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': True,
'tenant_id': 'project_id',
'got_exceptions': []}
base = cleanup_service.BaseService(kwargs)
self.assertEqual(base.data, kwargs['data'])
self.assertFalse(base.is_dry_run)
self.assertEqual(base.saved_state_json, kwargs['saved_state_json'])
self.assertFalse(base.is_preserve)
self.assertTrue(base.is_save_state)
self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
def test_not_implemented_ex(self):
kwargs = {'data': {'data': 'test'},
'is_dry_run': False,
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': False,
'tenant_id': 'project_id',
'got_exceptions': []}
base = self.TestException(kwargs)
# delete
base.run()
self.assertEqual(len(base.got_exceptions), 1)
# save_state
base.save_state = True
base.run()
self.assertEqual(len(base.got_exceptions), 2)
# dry_run
base.is_dry_run = True
base.run()
self.assertEqual(len(base.got_exceptions), 3)
class MockFunctionsBase(base.TestCase):
def _create_response(self, body, status, headers):
if status:
if body:
body = json.dumps(body)
resp = fake_http.fake_http_response(headers, status=status), body
return resp
else:
return body
def _create_fixtures(self, fixtures_to_make):
mocked_fixtures = []
for fixture in fixtures_to_make:
func, body, status = fixture
mocked_response = self._create_response(body, status, None)
if mocked_response == 'error':
mocked_func = self.useFixture(fixtures.MockPatch(
func, side_effect=Exception("error")))
else:
mocked_func = self.useFixture(fixtures.MockPatch(
func, return_value=mocked_response))
mocked_fixtures.append(mocked_func)
return mocked_fixtures
def run_function_with_mocks(self, function_to_run, functions_to_mock):
"""Mock a service client function for testing.
:param function_to_run: The service client function to call.
:param functions_to_mock: a list of tuples containing the function
to mock, the response body, and the response status.
EX:
('tempest.lib.common.rest_client.RestClient.get',
{'users': ['']},
200)
"""
mocked_fixtures = self._create_fixtures(functions_to_mock)
func_return = function_to_run()
return func_return, mocked_fixtures
class BaseCmdServiceTests(MockFunctionsBase):
def setUp(self):
super(BaseCmdServiceTests, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.patchobject(config, 'TempestConfigPrivate',
fake_config.FakePrivate)
self.useFixture(fixtures.MockPatch(
'tempest.cmd.cleanup_service._get_network_id',
return_value=''))
cleanup_service.init_conf()
self.conf_values = {"flavors": cleanup_service.CONF_FLAVORS[0],
"images": cleanup_service.CONF_IMAGES[0],
"projects": cleanup_service.CONF_PROJECTS[0],
"users": cleanup_service.CONF_USERS[0],
"networks": cleanup_service.CONF_PUB_NETWORK,
"security_groups":
cleanup_service.CONF_PROJECTS[0],
"ports": cleanup_service.CONF_PUB_NETWORK,
"routers": cleanup_service.CONF_PUB_ROUTER,
"subnetpools": cleanup_service.CONF_PROJECTS[0],
}
saved_state = {
# Static list to ensure global service saved items are not deleted
"users": {u'32rwef64245tgr20121qw324bgg': u'Lightning'},
"flavors": {u'42': u'm1.tiny'},
"images": {u'34yhwr-4t3q': u'stratus-0.3.2-x86_64-disk'},
"roles": {u'3efrt74r45hn': u'president'},
"projects": {u'f38ohgp93jj032': u'manhattan'},
"domains": {u'default': u'Default'},
# Static list to ensure project service saved items are not deleted
"snapshots": {u'1ad4c789-7e8w-4dwg-afc5': u'saved-snapshot'},
"servers": {u'7a6d4v7w-36ds-4216': u'saved-server'},
"server_groups": {u'as6d5f7g-46ca-475e': u'saved-server-group'},
"keypairs": {u'saved-key-pair': {
u'fingerprint': u'7e:eb:ab:24',
u'name': u'saved-key-pair'
}},
"volumes": {u'aa77asdf-1234': u'saved-volume'},
"networks": {u'6722fc13-4319': {
u'id': u'6722fc13-4319',
u'name': u'saved-network'
}},
"floatingips": {u'9e82d248-408a': {
u'id': u'9e82d248-408a',
u'status': u'ACTIVE'
}},
"routers": {u'4s5w34hj-id44': u'saved-router'},
"metering_label_rules": {u'93a973ce-4dc5': {
u'direction': u'ingress',
u'id': u'93a973ce-4dc5'
}},
"metering_labels": {u'723b346ce866-4c7q': u'saved-label'},
"ports": {u'aa74aa4v-741a': u'saved-port'},
"security_groups": {u'7q844add-3697': u'saved-sec-group'},
"subnets": {u'55ttda4a-2584': u'saved-subnet'},
"subnetpools": {u'8acf64c1-43fc': u'saved-subnet-pool'},
"regions": {u'RegionOne': {}}
}
# Mocked methods
get_method = 'tempest.lib.common.rest_client.RestClient.get'
delete_method = 'tempest.lib.common.rest_client.RestClient.delete'
log_method = 'tempest.cmd.cleanup_service.LOG.exception'
# Override parameters
service_class = 'BaseService'
response = None
service_name = 'default'
def _create_cmd_service(self, service_type, is_save_state=False,
is_preserve=False, is_dry_run=False):
creds = fake_credentials.FakeKeystoneV3Credentials()
os = clients.Manager(creds)
return getattr(cleanup_service, service_type)(
os,
is_save_state=is_save_state,
is_preserve=is_preserve,
is_dry_run=is_dry_run,
project_id='b8e3ece07bb049138d224436756e3b57',
data={},
saved_state_json=self.saved_state
)
def _test_delete(self, mocked_fixture_tuple_list, fail=False):
serv = self._create_cmd_service(self.service_class)
resp, fixtures = self.run_function_with_mocks(
serv.run,
mocked_fixture_tuple_list,
)
for fixture in fixtures:
if fixture.mock.return_value == 'validate':
fixture.mock.assert_called()
elif fail is False and fixture.mock.return_value == 'exception':
fixture.mock.assert_not_called()
elif self.service_name in self.saved_state.keys():
fixture.mock.assert_called_once()
for key in self.saved_state[self.service_name].keys():
self.assertNotIn(key, fixture.mock.call_args[0][0])
else:
fixture.mock.assert_called_once()
self.assertFalse(serv.data)
def _test_dry_run_true(self, mocked_fixture_tuple_list):
serv = self._create_cmd_service(self.service_class, is_dry_run=True)
_, fixtures = self.run_function_with_mocks(
serv.run,
mocked_fixture_tuple_list
)
for fixture in fixtures:
if fixture.mock.return_value == 'delete':
fixture.mock.assert_not_called()
elif self.service_name in self.saved_state.keys():
fixture.mock.assert_called_once()
for key in self.saved_state[self.service_name].keys():
self.assertNotIn(key, fixture.mock.call_args[0][0])
else:
fixture.mock.assert_called_once()
def _test_saved_state_true(self, mocked_fixture_tuple_list):
serv = self._create_cmd_service(self.service_class, is_save_state=True)
_, fixtures = self.run_function_with_mocks(
serv.run,
mocked_fixture_tuple_list
)
for item in self.response[self.service_name]:
self.assertIn(item['id'],
serv.data[self.service_name])
for fixture in fixtures:
fixture.mock.assert_called_once()
def _test_is_preserve_true(self, mocked_fixture_tuple_list):
serv = self._create_cmd_service(self.service_class, is_preserve=True)
resp, fixtures = self.run_function_with_mocks(
serv.list,
mocked_fixture_tuple_list
)
for fixture in fixtures:
fixture.mock.assert_called_once()
self.assertIn(resp[0], self.response[self.service_name])
for rsp in resp:
self.assertNotIn(rsp['id'], self.conf_values.values())
self.assertNotIn(rsp['name'], self.conf_values.values())
class TestSnapshotService(BaseCmdServiceTests):
service_class = 'SnapshotService'
service_name = 'snapshots'
response = {
"snapshots": [
{
"status": "available",
"metadata": {
"name": "test"
},
"name": "test-volume-snapshot",
"volume_id": "173f7b48-c4c1-4e70-9acc-086b39073506",
"created_at": "2015-11-29T02:25:51.000000",
"size": 1,
"updated_at": "2015-11-20T05:36:40.000000",
"id": "b1323cda-8e4b-41c1-afc5-2fc791809c8c",
"description": "volume snapshot"
},
{
"status": "available",
"name": "saved-snapshot",
"metadata": {},
"id": "1ad4c789-7e8w-4dwg-afc5",
"size": 1,
"volume_id": "af7c41be-1ff6-4233-a690-7ed61c34347f",
"created_at": "2015-11-20T05:39:40.000000",
"updated_at": "2015-11-20T05:39:40.000000",
"description": "snapshot in saved state"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 202),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestServerService(BaseCmdServiceTests):
service_class = 'ServerService'
service_name = 'servers'
response = {
"servers": [
{
"id": "22c91117-08de-4894-9aa9-6ef382400985",
"links": [
{
"href": "http://openstack.example.com/v2/6f70-6ef0985",
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e7-6ef35",
"rel": "bookmark"
}
],
"name": "new-server-test"
},
{
"id": "7a6d4v7w-36ds-4216",
"links": [
{
"href": "http://openstack.example.com/v2/6f70-6ef0985",
"rel": "self"
},
{
"href": "http://openstack.example.com/6f70656e7-6ef35",
"rel": "bookmark"
}
],
"name": "saved-server"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestServerGroupService(BaseCmdServiceTests):
service_class = 'ServerGroupService'
service_name = 'server_groups'
validate_response = ('tempest.lib.services.compute.server_groups_client'
'.ServerGroupsClient.validate_response')
response = {
"server_groups": [
{
"id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
"name": "test",
"policy": "anti-affinity",
"rules": {"max_server_per_host": 3},
"members": [],
"project_id": "6f70656e737461636b20342065766572",
"user_id": "fake"
},
{
"id": "as6d5f7g-46ca-475e",
"name": "saved-server-group"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200),
(self.validate_response, 'validate', None)
])
class TestKeyPairService(BaseCmdServiceTests):
service_class = 'KeyPairService'
service_name = 'keypairs'
validate_response = ('tempest.lib.services.compute.keypairs_client'
'.KeyPairsClient.validate_response')
response = {
"keypairs": [
{
"keypair": {
"fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:bd",
"name": "keypair-5d935425-31d5-48a7-a0f1-e76e9813f2c3",
"type": "ssh",
"public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF\n"
}
},
{
"keypair": {
"fingerprint": "7e:eb:ab:24",
"name": "saved-key-pair"
}
}
]
}
def _test_saved_state_true(self, mocked_fixture_tuple_list):
serv = self._create_cmd_service(self.service_class, is_save_state=True)
_, fixtures = self.run_function_with_mocks(
serv.run,
mocked_fixture_tuple_list
)
for item in self.response[self.service_name]:
self.assertTrue(item['keypair']['name'],
serv.data[self.service_name])
for fixture in fixtures:
fixture.mock.assert_called_once()
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([
(self.get_method, self.response, 200),
(self.validate_response, 'validate', None)
])
class TestVolumeService(BaseCmdServiceTests):
service_class = 'VolumeService'
service_name = 'volumes'
response = {
"volumes": [
{
"id": "efa54464-8fab-47cd-a05a-be3e6b396188",
"links": [
{
"href": "http://127.0.0.1:37097/v3/89af/volumes/efa54",
"rel": "self"
},
{
"href": "http://127.0.0.1:37097/89af/volumes/efa54464",
"rel": "bookmark"
}
],
"name": "volume-name"
},
{
"id": "aa77asdf-1234",
"name": "saved-volume"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 202),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestVolumeQuotaService(BaseCmdServiceTests):
service_class = 'VolumeQuotaService'
service_name = 'volume_quota_service'
response = {
"quota_set": {
"groups":
{"reserved": 0, "limit": 10, "in_use": 0},
"per_volume_gigabytes":
{"reserved": 0, "limit": -1, "in_use": 0},
"volumes":
{"reserved": 0, "limit": 10, "in_use": 0},
"gigabytes":
{"reserved": 0, "limit": 1000, "in_use": 0},
"backup_gigabytes":
{"reserved": 0, "limit": 1000, "in_use": 0},
"snapshots":
{"reserved": 0, "limit": 10, "in_use": 0},
"volumes_iscsi":
{"reserved": 0, "limit": -1, "in_use": 0},
"snapshots_iscsi":
{"reserved": 0, "limit": -1, "in_use": 0},
"backups":
{"reserved": 0, "limit": 10, "in_use": 0},
"gigabytes_iscsi":
{"reserved": 0, "limit": -1, "in_use": 0},
"id": "b8e3ece07bb049138d224436756e3b57"
}
}
def test_delete_fail(self):
delete_mock = [(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.delete_method, None, 200),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
class TestNovaQuotaService(BaseCmdServiceTests):
service_class = 'NovaQuotaService'
service_name = 'nova_quota_service'
response = {
"limits": {
"rate": [],
"absolute": {
"maxServerMeta": 128,
"maxPersonality": 5,
"totalServerGroupsUsed": 0,
"maxImageMeta": 128,
"maxPersonalitySize": 10240,
"maxTotalKeypairs": 100,
"maxSecurityGroupRules": 20,
"maxServerGroups": 10,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalInstancesUsed": 0,
"maxSecurityGroups": 10,
"totalFloatingIpsUsed": 0,
"maxTotalCores": 20,
"maxServerGroupMembers": 10,
"maxTotalFloatingIps": 10,
"totalSecurityGroupsUsed": 0,
"maxTotalInstances": 10,
"maxTotalRAMSize": 51200
}
}
}
def test_delete_fail(self):
delete_mock = [(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.delete_method, None, 202),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
class TestNetworkQuotaService(BaseCmdServiceTests):
service_class = 'NetworkQuotaService'
service_name = 'network_quota_service'
response = {
"quotas": [{
"subnet": 110,
"network": 100,
"floatingip": 50,
"tenant_id": "81e8490db559474dacb2212fca9cca2d",
"subnetpool": -1,
"security_group_rule": 100,
"trunk": -1,
"security_group": 10,
"router": 10,
"rbac_policy": 10, "project_id":
"81e8490db559474dacb2212fca9cca2d", "port": 500
}]
}
def test_delete_fail(self):
delete_mock = [(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
# Begin network service classes
class TestNetworkService(BaseCmdServiceTests):
service_class = 'NetworkService'
service_name = 'networks'
response = {
"networks": [
{
"admin_state_up": True,
"availability_zone_hints": [],
"availability_zones": [
"nova"
],
"created_at": "2016-03-08T20:19:41",
"dns_domain": "my-domain.org.",
"id": "d32019d3-bc6e-4319-9c1d-6722fc136a22",
"l2_adjacency": False,
"mtu": 1500,
"name": "net1",
"port_security_enabled": True,
"project_id": "4fd44f30292945e481c7b8a0c8908869",
"qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
"revision_number": 1,
"router:external": False,
"shared": False,
"status": "ACTIVE",
"subnets": [
"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
],
"tenant_id": "4fd44f30292945e481c7b8a0c8908869",
"updated_at": "2016-03-08T20:19:41",
"vlan_transparent": True,
"description": "",
"is_default": False
},
{
"id": "6722fc13-4319",
"name": "saved-network"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['networks'].append(
{
"admin_state_up": True,
"availability_zone_hints": [],
"availability_zones": [
"nova"
],
"created_at": "2017-03-08T20:19:41",
"dns_domain": "my-domain.org.",
"id": cleanup_service.CONF_PUB_NETWORK,
"name": "net2",
"port_security_enabled": True,
"project_id": "4fd44f30292945e481c7b8a0c8908869",
"qos_policy_id": "6a8454ade84346f59e8d40665f878b2e",
"revision_number": 1,
"status": "ACTIVE",
"subnets": [
"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"
],
"tenant_id": "4fd44f30292945e481c7b8a0c8908869",
"updated_at": "2018-03-08T20:19:41",
"vlan_transparent": True,
"is_default": False
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestNetworkFloatingIpService(BaseCmdServiceTests):
service_class = 'NetworkFloatingIpService'
service_name = 'floatingips'
response = {
"floatingips": [
{
"router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f",
"description": "for test",
"dns_domain": "my-domain.org.",
"dns_name": "myfip",
"created_at": "2016-12-21T10:55:50Z",
"updated_at": "2016-12-21T10:55:53Z",
"revision_number": 1,
"project_id": "4969c491a3c74ee4af974e6d800c62de",
"tenant_id": "4969c491a3c74ee4af974e6d800c62de",
"floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57",
"fixed_ip_address": "10.0.0.3",
"floating_ip_address": "172.24.4.228",
"port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab",
"id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7",
"status": "ACTIVE",
"port_details": {
"status": "ACTIVE",
"name": "",
"admin_state_up": True,
"network_id": "02dd8479-ef26-4398-a102-d19d0a7b3a1f",
"device_owner": "compute:nova",
"mac_address": "fa:16:3e:b1:3b:30",
"device_id": "8e3941b4-a6e9-499f-a1ac-2a4662025cba"
},
"tags": ["tag1,tag2"],
"port_forwardings": []
},
{
"id": "9e82d248-408a",
"status": "ACTIVE"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestNetworkRouterService(BaseCmdServiceTests):
service_class = 'NetworkRouterService'
service_name = 'routers'
validate_response = ('tempest.lib.services.network.routers_client'
'.RoutersClient.validate_response')
response = {
"routers": [
{
"admin_state_up": True,
"availability_zone_hints": [],
"availability_zones": [
"nova"
],
"created_at": "2018-03-19T19:17:04Z",
"description": "",
"distributed": False,
"external_gateway_info": {
"enable_snat": True,
"external_fixed_ips": [
{
"ip_address": "172.24.4.3",
"subnet_id": "b930d7f6-ceb7-40a0-8b81-a425dd994ccf"
},
{
"ip_address": "2001:db8::c",
"subnet_id": "0c56df5d-ace5-46c8-8f4c-45fa4e334d18"
}
],
"network_id": "ae34051f-aa6c-4c75-abf5-50dc9ac99ef3"
},
"flavor_id": "f7b14d9a-b0dc-4fbe-bb14-a0f4970a69e0",
"ha": False,
"id": "915a14a6-867b-4af7-83d1-70efceb146f9",
"name": "router2",
"revision_number": 1,
"routes": [
{
"destination": "179.24.1.0/24",
"nexthop": "172.24.3.99"
}
],
"status": "ACTIVE",
"updated_at": "2018-03-19T19:17:22Z",
"project_id": "0bd18306d801447bb457a46252d82d13",
"tenant_id": "0bd18306d801447bb457a46252d82d13",
"tags": ["tag1,tag2"]
},
{
"id": "4s5w34hj-id44",
"name": "saved-router"
}
],
# "ports" key is added to the response in order to simplify unit
# testing - it's because NetworkRouterService's delete method lists
# ports before deleting any router
"ports": []
}
def _test_delete(self, mocked_fixture_tuple_list, fail=False):
serv = self._create_cmd_service(self.service_class)
resp, fixtures = self.run_function_with_mocks(
serv.run,
mocked_fixture_tuple_list,
)
for fixture in fixtures:
if fail is False and fixture.mock.return_value == 'exception':
fixture.mock.assert_not_called()
elif self.service_name in self.saved_state.keys():
fixture.mock.assert_called()
for key in self.saved_state[self.service_name].keys():
self.assertNotIn(key, fixture.mock.call_args[0][0])
self.assertFalse(serv.data)
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['routers'].append(
{
"admin_state_up": True,
"availability_zone_hints": [],
"availability_zones": [
"nova"
],
"created_at": "2018-03-19T19:17:04Z",
"id": cleanup_service.CONF_PUB_ROUTER,
"name": "router-preserve",
"status": "ACTIVE",
"updated_at": "2018-03-19T19:17:22Z",
"project_id": "0bd18306d801447bb457a46252d82d13",
"tenant_id": "0bd18306d801447bb457a46252d82d13",
"tags": ["tag1,tag2"]
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestNetworkMeteringLabelRuleService(BaseCmdServiceTests):
service_class = 'NetworkMeteringLabelRuleService'
service_name = 'metering_label_rules'
response = {
"metering_label_rules": [
{
"remote_ip_prefix": "20.0.0.0/24",
"direction": "ingress",
"metering_label_id": "e131d186-b02d-4c0b-83d5-0c0725c4f812",
"id": "9536641a-7d14-4dc5-afaf-93a973ce0eb8",
"excluded": False
},
{
"direction": "ingress",
"id": "93a973ce-4dc5"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestNetworkMeteringLabelService(BaseCmdServiceTests):
service_class = 'NetworkMeteringLabelService'
service_name = 'metering_labels'
response = {
"metering_labels": [
{
"project_id": "45345b0ee1ea477fac0f541b2cb79cd4",
"tenant_id": "45345b0ee1ea477fac0f541b2cb79cd4",
"description": "label1 description",
"name": "label1",
"id": "a6700594-5b7a-4105-8bfe-723b346ce866",
"shared": False
},
{
"name": "saved-label",
"id": "723b346ce866-4c7q",
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestNetworkPortService(BaseCmdServiceTests):
service_class = 'NetworkPortService'
service_name = 'ports'
response = {
"ports": [
{
"admin_state_up": True,
"allowed_address_pairs": [],
"created_at": "2016-03-08T20:19:41",
"description": "",
"device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
"device_owner": "",
"dns_assignment": {
"hostname": "myport",
"ip_address": "172.24.4.2",
"fqdn": "myport.my-domain.org"
},
"dns_domain": "my-domain.org.",
"dns_name": "myport",
"extra_dhcp_opts": [
{
"opt_value": "pxelinux.0",
"ip_version": 4,
"opt_name": "bootfile-name"
}
],
"fixed_ips": [
{
"ip_address": "172.24.4.2",
"subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062"
}
],
"id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b",
"ip_allocation": "immediate",
"mac_address": "fa:16:3e:58:42:ed",
"name": "test_port",
"network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3",
"project_id": "",
"revision_number": 1,
"security_groups": [],
"status": "ACTIVE",
"tags": ["tag1,tag2"],
"tenant_id": "",
"updated_at": "2016-03-08T20:19:41",
"qos_policy_id": "29d5e02e-d5ab-4929-bee4-4a9fc12e22ae",
"port_security_enabled": False
},
{
"id": "aa74aa4v-741a",
"name": "saved-port",
"device_owner": ""
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['ports'].append(
{
"created_at": "2018-03-08T20:19:41",
"description": "",
"device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824",
"device_owner": "compute:router_gateway",
"id": "d80b1a3b-4fc1-49f3-952e-1fdy1ws542",
"ip_allocation": "immediate",
"mac_address": "fa:16:3e:58:42:ed",
"name": "preserve_port",
"network_id": cleanup_service.CONF_PUB_NETWORK,
"project_id": "",
"security_groups": [],
"status": "ACTIVE",
"tags": ["tag1,tag2"],
"tenant_id": "",
"updated_at": "2018-03-08T20:19:41",
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestNetworkSecGroupService(BaseCmdServiceTests):
service_class = 'NetworkSecGroupService'
service_name = 'security_groups'
response = {
"security_groups": [
{
"description": "default",
"id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"name": "test",
"security_group_rules": [
{
"direction": "egress",
"ethertype": "IPv6",
"id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
"security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
"project_id": "e4f50856753b4dc6afee5fa6b9b6c550",
"revision_number": 1,
"tags": ["tag1,tag2"],
"tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
"created_at": "2018-03-19T19:16:56Z",
"updated_at": "2018-03-19T19:16:56Z",
"description": ""
}
]
},
{
"id": "7q844add-3697",
"name": "saved-sec-group"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['security_groups'].append(
{
"description": "default",
"id": "85cc3048-abc3-43cc-89b3-377341426ac5",
"name": "test",
"security_group_rules": [
{
"direction": "egress",
"ethertype": "IPv6",
"id": "3c0e45ff-adaf-4124-b083-bf390e5482ff",
"security_group_id": "85cc3048-abc3-43cc-89b3-3773414",
"project_id": cleanup_service.CONF_PROJECTS[0],
"revision_number": 1,
"tags": ["tag1,tag2"],
"tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550",
"created_at": "2018-03-19T19:16:56Z",
"updated_at": "2018-03-19T19:16:56Z",
"description": ""
}
]
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestNetworkSubnetService(BaseCmdServiceTests):
service_class = 'NetworkSubnetService'
service_name = 'subnets'
response = {
"subnets": [
{
"name": "private-subnet",
"enable_dhcp": True,
"network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324",
"project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"dns_nameservers": [],
"allocation_pools": [
{
"start": "10.0.0.2",
"end": "10.0.0.254"
}
],
"host_routes": [],
"ip_version": 4,
"gateway_ip": "10.0.0.1",
"cidr": "10.0.0.0/24",
"id": "08eae331-0402-425a-923c-34f7cfe39c1b",
"created_at": "2016-10-10T14:35:34Z",
"revision_number": 2,
"service_types": [],
"tags": ["tag1,tag2"],
"updated_at": "2016-10-10T14:35:34Z"
},
{
"id": "55ttda4a-2584",
"name": "saved-subnet"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['subnets'].append(
{
"name": "public-subnet",
"network_id": cleanup_service.CONF_PUB_NETWORK,
"project_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e",
"ip_version": 4,
"gateway_ip": "10.0.0.1",
"cidr": "10.0.0.0/24",
"id": "08eae331-0402-425a-923c-34f7cfe39c1b",
"created_at": "2018-10-10T14:35:34Z",
"service_types": [],
"tags": ["tag1,tag2"],
"updated_at": "2018-10-10T14:35:34Z"
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestNetworkSubnetPoolsService(BaseCmdServiceTests):
service_class = 'NetworkSubnetPoolsService'
service_name = 'subnetpools'
response = {
"subnetpools": [
{
"min_prefixlen": "64",
"default_prefixlen": "64",
"id": "03f761e6-eee0-43fc-a921-8acf64c14988",
"max_prefixlen": "64",
"name": "my-subnet-pool-ipv6",
"is_default": False,
"project_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"tenant_id": "9fadcee8aa7c40cdb2114fff7d569c08",
"prefixes": [
"2001:db8:0:2::/64",
"2001:db8::/63"
],
"ip_version": 6,
"shared": False,
"description": "",
"created_at": "2016-03-08T20:19:41",
"updated_at": "2016-03-08T20:19:41",
"revision_number": 2,
"tags": ["tag1,tag2"]
},
{
"id": "8acf64c1-43fc",
"name": "saved-subnet-pool"
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['subnetpools'].append(
{
"min_prefixlen": "64",
"default_prefixlen": "64",
"id": "9acf64c1-43fc",
"name": "preserve-pool",
"project_id": cleanup_service.CONF_PROJECTS[0],
"created_at": "2016-03-08T20:19:41",
"updated_at": "2016-03-08T20:19:41"
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
# begin global services
class TestRegionService(BaseCmdServiceTests):
service_class = 'RegionService'
service_name = 'regions'
response = {
"regions": [{
"parent_region_id": None,
"id": "RegionOne",
"links": {
"self":
"http://10.0.145.61:5000/v3/regions/RegionOne"
},
"description": ""
},
{
"parent_region_id": None,
"id": "RegionTwo",
"links": {
"self":
"http://10.0.145.61:5000/v3/regions/RegionTwo"
},
"description": ""
}],
"links": {
"self":
"http://10.0.145.61:5000/v3/regions",
"next": None,
"previous": None
}
}
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, "exception", None)]
self._test_delete(delete_mock)
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, "exception", None)]
self._test_delete(delete_mock, fail=True)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestDomainService(BaseCmdServiceTests):
service_class = 'DomainService'
service_name = 'domains'
response = {
"domains": [
{
"description": "Destroy all humans",
"enabled": True,
"id": "5a75994a3",
"links": {
"self": "http://example.com/identity/v3/domains/5a75994a3"
},
"name": "Sky_net"
},
{
"description": "Owns users and tenants on Identity API",
"enabled": False,
"id": "default",
"links": {
"self": "http://example.com/identity/v3/domains/default"
},
"name": "Default"
}
]
}
mock_update = ("tempest.lib.services.identity.v3."
"domains_client.DomainsClient.update_domain")
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None),
(self.mock_update, 'update', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None),
(self.mock_update, 'update', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestProjectsService(BaseCmdServiceTests):
service_class = 'ProjectService'
service_name = 'projects'
response = {
"projects": [
{
"is_domain": False,
"description": None,
"domain_id": "default",
"enabled": True,
"id": "f38ohgp93jj032",
"links": {
"self": "http://example.com/identity/v3/projects"
"/f38ohgp93jj032"
},
"name": "manhattan",
"parent_id": None
},
{
"is_domain": False,
"description": None,
"domain_id": "default",
"enabled": True,
"id": "098f89d3292ri4jf4",
"links": {
"self": "http://example.com/identity/v3/projects"
"/098f89d3292ri4jf4"
},
"name": "Apollo",
"parent_id": None
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['projects'].append(
{
"is_domain": False,
"description": None,
"domain_id": "default",
"enabled": True,
"id": "r343q98h09f3092",
"links": {
"self": "http://example.com/identity/v3/projects"
"/r343q98h09f3092"
},
"name": cleanup_service.CONF_PROJECTS[0],
"parent_id": None
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestImagesService(BaseCmdServiceTests):
service_class = 'ImageService'
service_name = 'images'
response = {
"images": [
{
"status": "ACTIVE",
"name": "stratus-0.3.2-x86_64-disk",
"id": "34yhwr-4t3q",
"updated": "2014-11-03T16:40:10Z",
"links": [{
"href": "http://openstack.ex.com/v2/openstack/images/"
"34yhwr-4t3q",
"rel": "self"}],
"created": "2014-10-30T08:23:39Z",
"minDisk": 0,
"minRam": 0,
"progress": 0,
"metadata": {},
},
{
"status": "ACTIVE",
"name": "cirros-0.3.2-x86_64-disk",
"id": "1bea47ed-f6a9",
"updated": "2014-11-03T16:40:10Z",
"links": [{
"href": "http://openstack.ex.com/v2/openstack/images/"
"1bea47ed-f6a9",
"rel": "self"}],
"created": "2014-10-30T08:23:39Z",
"minDisk": 0,
"minRam": 0,
"progress": 0,
"metadata": {},
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['images'].append(
{
"status": "ACTIVE",
"name": "cirros-0.3.2-x86_64-disk",
"id": cleanup_service.CONF_IMAGES[0],
"updated": "2014-11-03T16:40:10Z",
"links": [{
"href": "http://openstack.ex.com/v2/openstack/images/"
"None",
"rel": "self"}],
"created": "2014-10-30T08:23:39Z",
"minDisk": 0,
"minRam": 0,
"progress": 0,
"metadata": {},
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestFlavorService(BaseCmdServiceTests):
service_class = 'FlavorService'
service_name = 'flavors'
response = {
"flavors": [
{
"disk": 1,
"id": "42",
"links": [{
"href": "http://openstack.ex.com/v2/openstack/flavors/1",
"rel": "self"}, {
"href": "http://openstack.ex.com/openstack/flavors/1",
"rel": "bookmark"}],
"name": "m1.tiny",
"ram": 512,
"swap": 1,
"vcpus": 1
},
{
"disk": 2,
"id": "13",
"links": [{
"href": "http://openstack.ex.com/v2/openstack/flavors/2",
"rel": "self"}, {
"href": "http://openstack.ex.com/openstack/flavors/2",
"rel": "bookmark"}],
"name": "m1.tiny",
"ram": 512,
"swap": 1,
"vcpus": 1
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 202),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['flavors'].append(
{
"disk": 3,
"id": cleanup_service.CONF_FLAVORS[0],
"links": [{
"href": "http://openstack.ex.com/v2/openstack/flavors/3",
"rel": "self"}, {
"href": "http://openstack.ex.com/openstack/flavors/3",
"rel": "bookmark"}],
"name": "m1.tiny",
"ram": 512,
"swap": 1,
"vcpus": 1
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
class TestRoleService(BaseCmdServiceTests):
service_class = 'RoleService'
service_name = 'roles'
response = {
"roles": [
{
"domain_id": "FakeDomain",
"id": "3efrt74r45hn",
"name": "president",
"links": {
"self": "http://ex.com/identity/v3/roles/3efrt74r45hn"
}
},
{
"domain_id": 'FakeDomain',
"id": "39ruo5sdk040",
"name": "vice-p",
"links": {
"self": "http://ex.com/identity/v3/roles/39ruo5sdk040"
}
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
class TestUserService(BaseCmdServiceTests):
service_class = 'UserService'
service_name = 'users'
response = {
"users": [
{
"domain_id": "TempestDomain",
"enabled": True,
"id": "e812fb332456423fdv1b1320121qwe2",
"links": {
"self": "http://example.com/identity/v3/users/"
"e812fb332456423fdv1b1320121qwe2",
},
"name": "Thunder",
"password_expires_at": "3102-11-06T15:32:17.000000",
},
{
"domain_id": "TempestDomain",
"enabled": True,
"id": "32rwef64245tgr20121qw324bgg",
"links": {
"self": "http://example.com/identity/v3/users/"
"32rwef64245tgr20121qw324bgg",
},
"name": "Lightning",
"password_expires_at": "1893-11-06T15:32:17.000000",
}
]
}
def test_delete_fail(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock, fail=True)
def test_delete_pass(self):
delete_mock = [(self.get_method, self.response, 200),
(self.delete_method, None, 204),
(self.log_method, 'exception', None)]
self._test_delete(delete_mock)
def test_dry_run(self):
dry_mock = [(self.get_method, self.response, 200),
(self.delete_method, "delete", None)]
self._test_dry_run_true(dry_mock)
def test_save_state(self):
self._test_saved_state_true([(self.get_method, self.response, 200)])
def test_preserve_list(self):
self.response['users'].append(
{
"domain_id": "TempestDomain",
"enabled": True,
"id": "23ads5tg3rtrhe30121qwhyth",
"links": {
"self": "http://example.com/identity/v3/users/"
"23ads5tg3rtrhe30121qwhyth",
},
"name": cleanup_service.CONF_USERS[0],
"password_expires_at": "1893-11-06T15:32:17.000000",
})
self._test_is_preserve_true([(self.get_method, self.response, 200)])
|
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the profile page."""
__author__ = 'Sean Lip'
from core.domain import exp_services
from core.domain import user_services
from core.tests import test_utils
import feconf
class SignupTest(test_utils.GenericTestBase):
def test_signup_page_does_not_have_top_right_menu(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['Logout', 'Sign in'])
self.logout()
def test_going_somewhere_else_while_signing_in_logs_user_out(self):
exp_services.load_demo('0')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
self.assertEqual(response.status_int, 200)
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 302)
self.assertIn('Logout', response.headers['location'])
self.assertIn('create', response.headers['location'])
self.logout()
def test_accepting_terms_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': False},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': 'Hasta la vista!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('you will need to accept', response_dict['error'])
self.post_json(
feconf.SIGNUP_DATA_URL,
{'agreed_to_terms': True, 'username': 'myusername'},
csrf_token=csrf_token)
self.logout()
def test_username_is_handled_correctly(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL, {'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn('Empty username supplied', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': '!a!', 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': self.UNICODE_TEST_STRING, 'agreed_to_terms': True},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abcde', 'agreed_to_terms': True},
csrf_token=csrf_token)
self.logout()
class UsernameCheckHandlerTests(test_utils.GenericTestBase):
def test_username_check(self):
self.signup('abc@example.com', username='abc')
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'abc'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': True
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': 'def'},
csrf_token=csrf_token)
self.assertEqual(response_dict, {
'username_is_taken': False
})
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL, {'username': '!!!INVALID!!!'},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
response_dict = self.post_json(
feconf.USERNAME_CHECK_DATA_URL,
{'username': self.UNICODE_TEST_STRING},
csrf_token=csrf_token, expect_errors=True, expected_status_int=400)
self.assertEqual(response_dict['code'], 400)
self.assertIn(
'can only have alphanumeric characters', response_dict['error'])
self.logout()
class EmailPreferencesTests(test_utils.GenericTestBase):
def test_user_not_setting_email_prefs_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True},
csrf_token=csrf_token)
# The email update preference should be whatever the setting in feconf
# is.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
def test_user_allowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': True},
csrf_token=csrf_token)
# The email update preference should be True in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': True})
def test_user_disallowing_emails_on_signup(self):
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.SIGNUP_URL)
csrf_token = self.get_csrf_token_from_response(response)
self.post_json(
feconf.SIGNUP_DATA_URL,
{'username': 'abc', 'agreed_to_terms': True,
'can_receive_email_updates': False},
csrf_token=csrf_token)
# The email update preference should be False in all cases.
self.EDITOR_ID = self.get_user_id_from_email(self.EDITOR_EMAIL)
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', True):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
with self.swap(feconf, 'DEFAULT_EMAIL_UPDATES_PREFERENCE', False):
self.assertEqual(
user_services.get_email_preferences(self.EDITOR_ID),
{'can_receive_email_updates': False})
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.compat.v1.Session with grpc:// URLs.
This test file focuses on the grpc:// debugging of local (non-distributed)
tf.Sessions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import session_debug_testlib
from tensorflow.python.debug.wrappers import framework
from tensorflow.python.debug.wrappers import grpc_wrapper
from tensorflow.python.debug.wrappers import hooks
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class GrpcDebugServerTest(test_util.TensorFlowTestCase):
def testRepeatedRunServerRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
# The server is started asynchronously. It needs to be polled till its state
# has become started.
with self.assertRaisesRegex(ValueError,
"Server has already started running"):
server.run_server()
server.stop_server().wait()
server_thread.join()
def testRepeatedStopServerRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
server.stop_server().wait()
server_thread.join()
with self.assertRaisesRegex(ValueError, "Server has already stopped"):
server.stop_server().wait()
def testRunServerAfterStopRaisesException(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
server.stop_server().wait()
server_thread.join()
with self.assertRaisesRegex(ValueError, "Server has already stopped"):
server.run_server()
def testStartServerWithoutBlocking(self):
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True, blocking=False)
# The thread that starts the server shouldn't block, so we should be able to
# join it before stopping the server.
server_thread.join()
server.stop_server().wait()
@test_util.run_v1_only(
"GrpcDebugWrapperSession and GrpcDebugHookare are for tf.Session only")
class SessionDebugGrpcTest(session_debug_testlib.SessionDebugTestBase):
@classmethod
def setUpClass(cls):
session_debug_testlib.SessionDebugTestBase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
session_debug_testlib.SessionDebugTestBase.tearDownClass()
def setUp(self):
# Override the dump root as the test server's dump directory.
self._dump_root = self._server_dump_dir
def tearDown(self):
if os.path.isdir(self._server_dump_dir):
file_io.delete_recursively(self._server_dump_dir)
session_debug_testlib.SessionDebugTestBase.tearDown(self)
def _debug_urls(self, run_number=None):
return ["grpc://localhost:%d" % self._server_port]
def _debug_dump_dir(self, run_number=None):
if run_number is None:
return self._dump_root
else:
return os.path.join(self._dump_root, "run_%d" % run_number)
def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaisesRegex(
TypeError, "Expected type str or list in grpc_debug_server_addresses"):
grpc_wrapper.GrpcDebugWrapperSession(sess, 1337)
def testConstructGrpcDebugWrapperSessionWithInvalidTypeRaisesException2(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaisesRegex(
TypeError, "Expected type str in list grpc_debug_server_addresses"):
grpc_wrapper.GrpcDebugWrapperSession(sess, ["localhost:1337", 1338])
def testUseInvalidWatchFnTypeWithGrpcDebugWrapperSessionRaisesException(self):
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
with self.assertRaises(TypeError):
grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port, watch_fn="foo")
def testGrpcDebugWrapperSessionWithoutWatchFnWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port)
w_result = sess.run(w)
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertLessEqual(5, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u", 0, "DebugIdentity"))
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertAllClose([20.0], dump.get_tensors("v", 0, "DebugIdentity"))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertAllClose([42.0], dump.get_tensors("w", 0, "DebugIdentity"))
def testGrpcDebugWrapperSessionWithWatchFnWorks(self):
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return ["DebugIdentity", "DebugNumericSummary"], r".*/read", None
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % self._server_port, watch_fn=watch_fn)
w_result = sess.run(w)
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(4, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("u/read", 0, "DebugNumericSummary")[0]))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("v/read", 0, "DebugNumericSummary")[0]))
def testGrpcDebugHookWithStatelessWatchFnWorks(self):
# Perform some set up. Specifically, construct a simple TensorFlow graph and
# create a watch function for certain ops.
def watch_fn(feeds, fetch_keys):
del feeds, fetch_keys
return framework.WatchOptions(
debug_ops=["DebugIdentity", "DebugNumericSummary"],
node_name_regex_allowlist=r".*/read",
op_type_regex_allowlist=None,
tolerate_debug_op_creation_failures=True)
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
# Create a hook. One could use this hook with say a tflearn Estimator.
# However, we use a HookedSession in this test to avoid depending on the
# internal implementation of Estimators.
grpc_debug_hook = hooks.GrpcDebugHook(
["localhost:%d" % self._server_port], watch_fn=watch_fn)
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Run the hooked session. This should stream tensor data to the GRPC
# endpoints.
w_result = sess.run(w)
# Verify that the hook monitored the correct tensors.
self.assertAllClose(42.0, w_result)
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertEqual(4, dump.size)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("u/read", 0, "DebugNumericSummary")[0]))
self.assertAllClose([20.0], dump.get_tensors("v/read", 0, "DebugIdentity"))
self.assertEqual(
14, len(dump.get_tensors("v/read", 0, "DebugNumericSummary")[0]))
def testTensorBoardDebugHookWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(u.initializer)
sess.run(v.initializer)
grpc_debug_hook = hooks.TensorBoardDebugHook(
["localhost:%d" % self._server_port])
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Activate watch point on a tensor before calling sess.run().
self._server.request_watch("u/read", 0, "DebugIdentity")
self.assertAllClose(42.0, sess.run(w))
# self.assertAllClose(42.0, sess.run(w))
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertAllClose([2.1], dump.get_tensors("u/read", 0, "DebugIdentity"))
# Check that the server has received the stack trace.
self.assertTrue(self._server.query_op_traceback("u"))
self.assertTrue(self._server.query_op_traceback("u/read"))
self.assertTrue(self._server.query_op_traceback("v"))
self.assertTrue(self._server.query_op_traceback("v/read"))
self.assertTrue(self._server.query_op_traceback("w"))
# Check that the server has received the python file content.
# Query an arbitrary line to make sure that is the case.
with open(__file__, "rt") as this_source_file:
first_line = this_source_file.readline().strip()
self.assertEqual(
first_line, self._server.query_source_file_line(__file__, 1))
self._server.clear_data()
# Call sess.run() again, and verify that this time the traceback and source
# code is not sent, because the graph version is not newer.
self.assertAllClose(42.0, sess.run(w))
with self.assertRaises(ValueError):
self._server.query_op_traceback("delta_1")
with self.assertRaises(ValueError):
self._server.query_source_file_line(__file__, 1)
def testTensorBoardDebugHookDisablingTracebackSourceCodeSendingWorks(self):
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess = session.Session(
config=session_debug_testlib.no_rewrite_session_config())
sess.run(variables.global_variables_initializer())
grpc_debug_hook = hooks.TensorBoardDebugHook(
["localhost:%d" % self._server_port],
send_traceback_and_source_code=False)
sess = monitored_session._HookedSession(sess, [grpc_debug_hook])
# Activate watch point on a tensor before calling sess.run().
self._server.request_watch("u/read", 0, "DebugIdentity")
self.assertAllClose(42.0, sess.run(w))
# Check that the server has _not_ received any tracebacks, as a result of
# the disabling above.
with self.assertRaisesRegex(ValueError, r"Op .*u/read.* does not exist"):
self.assertTrue(self._server.query_op_traceback("u/read"))
with self.assertRaisesRegex(ValueError,
r".* has not received any source file"):
self._server.query_source_file_line(__file__, 1)
def testConstructGrpcDebugHookWithOrWithouGrpcInUrlWorks(self):
hooks.GrpcDebugHook(["grpc://foo:42424"])
hooks.GrpcDebugHook(["foo:42424"])
class SessionDebugConcurrentTest(
session_debug_testlib.DebugConcurrentRunCallsTest):
@classmethod
def setUpClass(cls):
session_debug_testlib.SessionDebugTestBase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
session_debug_testlib.SessionDebugTestBase.tearDownClass()
def setUp(self):
self._num_concurrent_runs = 3
self._dump_roots = []
for i in range(self._num_concurrent_runs):
self._dump_roots.append(
os.path.join(self._server_dump_dir, "thread%d" % i))
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._server_dump_dir):
file_io.delete_recursively(self._server_dump_dir)
def _get_concurrent_debug_urls(self):
urls = []
for i in range(self._num_concurrent_runs):
urls.append(self._debug_server_url + "/thread%d" % i)
return urls
@test_util.run_v1_only("GrpcDebugWrapperSession is for tf.Session only")
class SessionDebugGrpcGatingTest(test_util.TensorFlowTestCase):
"""Test server gating of debug ops."""
@classmethod
def setUpClass(cls):
(cls._server_port_1, cls._debug_server_url_1, _, cls._server_thread_1,
cls._server_1) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
(cls._server_port_2, cls._debug_server_url_2, _, cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False)
cls._servers_and_threads = [(cls._server_1, cls._server_thread_1),
(cls._server_2, cls._server_thread_2)]
@classmethod
def tearDownClass(cls):
for server, thread in cls._servers_and_threads:
server.stop_server().wait()
thread.join()
def tearDown(self):
ops.reset_default_graph()
self._server_1.clear_data()
self._server_2.clear_data()
def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenDebugNodes(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_1")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)",
"DebugNumericSummary(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
for i in xrange(4):
self._server_1.clear_data()
if i % 2 == 0:
self._server_1.request_watch("delta_1", 0, "DebugIdentity")
self._server_1.request_watch("delta_2", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_1", 0, "DebugNumericSummary")
self._server_1.request_unwatch("delta_2", 0, "DebugNumericSummary")
else:
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
self._server_1.request_watch("delta_1", 0, "DebugNumericSummary")
self._server_1.request_watch("delta_2", 0, "DebugNumericSummary")
sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
# Watched debug tensors are:
# Run 0: delta_[1,2]:0:DebugIdentity
# Run 1: delta_[1,2]:0:DebugNumericSummary
# Run 2: delta_[1,2]:0:DebugIdentity
# Run 3: delta_[1,2]:0:DebugNumericSummary
self.assertEqual(2, len(self._server_1.debug_tensor_values))
if i % 2 == 0:
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
else:
self.assertAllClose(
[[1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 5.0, 5.0, 5.0,
0.0, 1.0, 0.0]],
self._server_1.debug_tensor_values[
"delta_1:0:DebugNumericSummary"])
self.assertAllClose(
[[1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, -5.0, -5.0, -5.0,
0.0, 1.0, 0.0]],
self._server_1.debug_tensor_values[
"delta_2:0:DebugNumericSummary"])
def testToggleWatchesOnCoreMetadata(self):
(_, debug_server_url, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
dump_to_filesystem=False,
toggle_watch_on_core_metadata=[("toggled_1", 0, "DebugIdentity"),
("toggled_2", 0, "DebugIdentity")])
self._servers_and_threads.append((server, server_thread))
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_1")
# These two nodes have names that match those in the
# toggle_watch_on_core_metadata argument used when calling
# start_server_on_separate_thread().
toggled_1 = constant_op.constant(5.0, name="toggled_1")
toggled_2 = constant_op.constant(-5.0, name="toggled_2")
inc_v_1 = state_ops.assign_add(v_1, toggled_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, toggled_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[debug_server_url])
for i in xrange(4):
server.clear_data()
sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
if i % 2 == 0:
self.assertEqual(2, len(server.debug_tensor_values))
self.assertAllClose(
[5.0],
server.debug_tensor_values["toggled_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
server.debug_tensor_values["toggled_2:0:DebugIdentity"])
else:
self.assertEqual(0, len(server.debug_tensor_values))
def testToggleEnableTwoDebugWatchesNoCrosstalkBetweenServers(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v = variables.VariableV1(50.0, name="v")
delta = constant_op.constant(5.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(v.initializer)
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1, self._debug_server_url_2])
for i in xrange(4):
self._server_1.clear_data()
self._server_2.clear_data()
if i % 2 == 0:
self._server_1.request_watch("delta", 0, "DebugIdentity")
self._server_2.request_watch("v", 0, "DebugIdentity")
else:
self._server_1.request_unwatch("delta", 0, "DebugIdentity")
self._server_2.request_unwatch("v", 0, "DebugIdentity")
sess.run(inc_v, options=run_options, run_metadata=run_metadata)
if i % 2 == 0:
self.assertEqual(1, len(self._server_1.debug_tensor_values))
self.assertEqual(1, len(self._server_2.debug_tensor_values))
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta:0:DebugIdentity"])
self.assertAllClose(
[50 + 5.0 * i],
self._server_2.debug_tensor_values["v:0:DebugIdentity"])
else:
self.assertEqual(0, len(self._server_1.debug_tensor_values))
self.assertEqual(0, len(self._server_2.debug_tensor_values))
def testToggleBreakpointsWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
for i in xrange(4):
self._server_1.clear_data()
if i in (0, 2):
# Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
self._server_1.request_watch(
"delta_2", 0, "DebugIdentity", breakpoint=True)
else:
# Disable the breakpoint in runs 1 and 3.
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
output = sess.run([inc_v_1, inc_v_2],
options=run_options, run_metadata=run_metadata)
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
if i in (0, 2):
# During runs 0 and 2, the server should have received the published
# debug tensor delta:0:DebugIdentity. The breakpoint should have been
# unblocked by EventReply responses from the server.
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
# After the runs, the server should have properly registered the
# breakpoints due to the request_unwatch calls.
self.assertSetEqual({("delta_1", 0, "DebugIdentity"),
("delta_2", 0, "DebugIdentity")},
self._server_1.breakpoints)
else:
# After the end of runs 1 and 3, the server has received the requests
# to disable the breakpoint at delta:0:DebugIdentity.
self.assertSetEqual(set(), self._server_1.breakpoints)
def testTensorBoardDebuggerWrapperToggleBreakpointsWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run([v_1.initializer, v_2.initializer])
# The TensorBoardDebugWrapperSession should add a DebugIdentity debug op
# with attribute gated_grpc=True for every tensor in the graph.
sess = grpc_wrapper.TensorBoardDebugWrapperSession(
sess, self._debug_server_url_1)
for i in xrange(4):
self._server_1.clear_data()
if i in (0, 2):
# Enable breakpoint at delta_[1,2]:0:DebugIdentity in runs 0 and 2.
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
self._server_1.request_watch(
"delta_2", 0, "DebugIdentity", breakpoint=True)
else:
# Disable the breakpoint in runs 1 and 3.
self._server_1.request_unwatch("delta_1", 0, "DebugIdentity")
self._server_1.request_unwatch("delta_2", 0, "DebugIdentity")
output = sess.run([inc_v_1, inc_v_2])
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
if i in (0, 2):
# During runs 0 and 2, the server should have received the published
# debug tensor delta:0:DebugIdentity. The breakpoint should have been
# unblocked by EventReply responses from the server.
self.assertAllClose(
[5.0],
self._server_1.debug_tensor_values["delta_1:0:DebugIdentity"])
self.assertAllClose(
[-5.0],
self._server_1.debug_tensor_values["delta_2:0:DebugIdentity"])
# After the runs, the server should have properly registered the
# breakpoints.
else:
# After the end of runs 1 and 3, the server has received the requests
# to disable the breakpoint at delta:0:DebugIdentity.
self.assertSetEqual(set(), self._server_1.breakpoints)
if i == 0:
# Check that the server has received the stack trace.
self.assertTrue(self._server_1.query_op_traceback("delta_1"))
self.assertTrue(self._server_1.query_op_traceback("delta_2"))
self.assertTrue(self._server_1.query_op_traceback("inc_v_1"))
self.assertTrue(self._server_1.query_op_traceback("inc_v_2"))
# Check that the server has received the python file content.
# Query an arbitrary line to make sure that is the case.
with open(__file__, "rt") as this_source_file:
first_line = this_source_file.readline().strip()
self.assertEqual(
first_line, self._server_1.query_source_file_line(__file__, 1))
else:
# In later Session.run() calls, the traceback shouldn't have been sent
# because it is already sent in the 1st call. So calling
# query_op_traceback() should lead to an exception, because the test
# debug server clears the data at the beginning of every iteration.
with self.assertRaises(ValueError):
self._server_1.query_op_traceback("delta_1")
with self.assertRaises(ValueError):
self._server_1.query_source_file_line(__file__, 1)
def testTensorBoardDebuggerWrapperDisablingTracebackSourceSendingWorks(self):
with session.Session(
config=session_debug_testlib.no_rewrite_session_config()) as sess:
v_1 = variables.VariableV1(50.0, name="v_1")
v_2 = variables.VariableV1(-50.0, name="v_2")
delta_1 = constant_op.constant(5.0, name="delta_1")
delta_2 = constant_op.constant(-5.0, name="delta_2")
inc_v_1 = state_ops.assign_add(v_1, delta_1, name="inc_v_1")
inc_v_2 = state_ops.assign_add(v_2, delta_2, name="inc_v_2")
sess.run(variables.global_variables_initializer())
# Disable the sending of traceback and source code.
sess = grpc_wrapper.TensorBoardDebugWrapperSession(
sess, self._debug_server_url_1, send_traceback_and_source_code=False)
for i in xrange(4):
self._server_1.clear_data()
if i == 0:
self._server_1.request_watch(
"delta_1", 0, "DebugIdentity", breakpoint=True)
output = sess.run([inc_v_1, inc_v_2])
self.assertAllClose([50.0 + 5.0 * (i + 1), -50 - 5.0 * (i + 1)], output)
# No op traceback or source code should have been received by the debug
# server due to the disabling above.
with self.assertRaisesRegex(ValueError,
r"Op .*delta_1.* does not exist"):
self.assertTrue(self._server_1.query_op_traceback("delta_1"))
with self.assertRaisesRegex(ValueError,
r".* has not received any source file"):
self._server_1.query_source_file_line(__file__, 1)
def testGetGrpcDebugWatchesReturnsCorrectAnswer(self):
with session.Session() as sess:
v = variables.VariableV1(50.0, name="v")
delta = constant_op.constant(5.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(v.initializer)
# Before any debugged runs, the server should be aware of no debug
# watches.
self.assertEqual([], self._server_1.gated_grpc_debug_watches())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.add_debug_tensor_watch(
run_options, "delta", output_slot=0,
debug_ops=["DebugNumericSummary(gated_grpc=true)"],
debug_urls=[self._debug_server_url_1])
debug_utils.add_debug_tensor_watch(
run_options, "v", output_slot=0,
debug_ops=["DebugIdentity"],
debug_urls=[self._debug_server_url_1])
sess.run(inc_v, options=run_options, run_metadata=run_metadata)
# After the first run, the server should have noted the debug watches
# for which gated_grpc == True, but not the ones with gated_grpc == False.
self.assertEqual(1, len(self._server_1.gated_grpc_debug_watches()))
debug_watch = self._server_1.gated_grpc_debug_watches()[0]
self.assertEqual("delta", debug_watch.node_name)
self.assertEqual(0, debug_watch.output_slot)
self.assertEqual("DebugNumericSummary", debug_watch.debug_op)
@test_util.run_v1_only("GrpcDebugWrapperSession is for tf.Session only")
class DelayedDebugServerTest(test_util.TensorFlowTestCase):
def testDebuggedSessionRunWorksWithDelayedDebugServerStartup(self):
"""Test debugged Session.run() tolerates delayed debug server startup."""
ops.reset_default_graph()
# Start a debug server asynchronously, with a certain amount of delay.
(debug_server_port, _, _, server_thread,
debug_server) = grpc_debug_test_server.start_server_on_separate_thread(
server_start_delay_sec=2.0, dump_to_filesystem=False)
with self.cached_session() as sess:
a_init = constant_op.constant(42.0, name="a_init")
a = variables.VariableV1(a_init, name="a")
def watch_fn(fetches, feeds):
del fetches, feeds
return framework.WatchOptions(debug_ops=["DebugIdentity"])
sess = grpc_wrapper.GrpcDebugWrapperSession(
sess, "localhost:%d" % debug_server_port, watch_fn=watch_fn)
sess.run(a.initializer)
self.assertAllClose(
[42.0], debug_server.debug_tensor_values["a_init:0:DebugIdentity"])
debug_server.stop_server().wait()
server_thread.join()
if __name__ == "__main__":
googletest.main()
|
|
#!/usr/bin/python2
# import inspect
import os
import re
from optparse import OptionParser
import sys
sys.path.append("/home/01934/srinath/pkg/cheetah/lib/python")
from Cheetah.Template import Template
import zcc_parser
import merkle
# Go in apps_sfdl_gen/
PROVER_H = "_p.h"
VERIFIER_H = "_v.h" #Output in ginger
PROVER_IMPL = "_p.cpp"
VERIFIER_IMPL = "_v.cpp" #Output in ginger
CONSTANTS_H = "_cons.h"
CONSTANTS_IMPL = "_cons.cpp"
VERIFIER_INP_GEN_H = "_v_inp_gen.h"
VERIFIER_INP_GEN_IMPL = "_v_inp_gen.cpp"
# Go in bin/
F1_INDEX = ".f1index"
GAMMA12 = ".gamma12"
GAMMA0 = ".gamma0"
QAP = ".qap" #Output in zaatar
PROVER_WORKSHEET = ".pws" #Prover worksheet (output if in worksheet mode)
# Go in apps_sfdl_hw/
VERIFIER_INP_GEN_HW_H = "_v_inp_gen_hw.h"
VERIFIER_INP_GEN_HW_IMPL = "_v_inp_gen_hw.cpp"
PROVER_EXO_HW_H = "_p_exo.h"
PROVER_EXO_HW_IMPL = "_p_exo.cpp"
#Directory that stores templates
DIR_TMPL = "templates/"
#Templates
CONSTANTS_H_TMPL = DIR_TMPL + "cons.h.tmpl"
CONSTANTS_IMPL_TMPL = DIR_TMPL + "cons.cc.tmpl"
PROVER_GINGER_H_TMPL = DIR_TMPL + "prover.ginger.h.tmpl"
PROVER_GINGER_CC_TMPL = DIR_TMPL + "prover.ginger.cc.tmpl"
PROVER_ZAATAR_H_TMPL = DIR_TMPL + "prover.zaatar.h.tmpl"
PROVER_ZAATAR_CC_TMPL = DIR_TMPL + "prover.zaatar.cc.tmpl"
VERIFIER_INP_GEN_HW_H_TMPL = DIR_TMPL + "verifier_inp_gen_hw.h.tmpl"
VERIFIER_INP_GEN_HW_CC_TMPL = DIR_TMPL + "verifier_inp_gen_hw.cc.tmpl"
PROVER_EXO_HW_H_TMPL = DIR_TMPL + "prover_exo.h.tmpl"
PROVER_EXO_HW_CC_TMPL = DIR_TMPL + "prover_exo.cc.tmpl"
VERIFIER_GINGER_H_TMPL = DIR_TMPL + "verifier.ginger.h.tmpl"
VERIFIER_GINGER_CC_TMPL = DIR_TMPL + "verifier.ginger.cc.tmpl"
VERIFIER_ZAATAR_H_TMPL = DIR_TMPL + "verifier.zaatar.h.tmpl"
VERIFIER_ZAATAR_CC_TMPL = DIR_TMPL + "verifier.zaatar.cc.tmpl"
VERIFIER_INP_GEN_H_TMPL = DIR_TMPL + "verifier_inp_gen.h.tmpl"
VERIFIER_INP_GEN_CC_TMPL = DIR_TMPL + "verifier_inp_gen.cc.tmpl"
MAIN_GINGER_TMPL = DIR_TMPL + "main.ginger.cc.tmpl"
MAIN_ZAATAR_TMPL = DIR_TMPL + "main.zaatar.cc.tmpl"
class CodeGenerator():
def __init__(self, output_dir, output_prefix, class_name, framework, worksheetMode, language):
self.output_dir = output_dir
self.output_prefix = output_prefix
self.class_name = class_name
zcc_parser.class_name = class_name
zcc_parser.output_dir = output_dir
self.framework = framework
zcc_parser.framework = framework
self.worksheetMode = worksheetMode
self.language = language
def write_to_file(self, name, contents):
with open(os.path.join(self.output_dir,name), "w") as f:
f.write(contents)
def open_spec_file(self, spec_path):
spec_file = open(spec_path, "r")
zcc_parser.parse_spec_file(spec_file)
if (zcc_parser.verbose):
print ("Expanding database operations in " + spec_path)
spec_file = zcc_parser.expand_db_ops_in_spec(spec_file)
spec_file = zcc_parser.generate_memory_consistency_in_spec(spec_file)
return spec_file
def generate_pws(self, spec_file, defs):
pws_loc = "bin/" + self.class_name + PROVER_WORKSHEET
pws_abs_loc = os.path.join(self.output_dir,pws_loc)
if (zcc_parser.verbose):
print ("Creating prover worksheet, result will appear at "+pws_loc)
(defs['num_io_vars'], defs['num_z_vars']) = zcc_parser.generate_computation_worksheet(spec_file, pws_abs_loc) #leads to variable creation
if (self.worksheetMode):
defs['computation'] = zcc_parser.generate_computation_dynamic("bin/" + self.class_name + PROVER_WORKSHEET)
else:
defs['computation'] = zcc_parser.generate_computation_static(spec_file)
spec_file.seek(0)
def generate_matrices(self, spec_file, defs):
#Generate the variable shuffling
(f1_index, shuffledIndices) = zcc_parser.generate_F1_index()
self.write_to_file("bin/" + self.class_name+F1_INDEX, f1_index)
if (self.framework=="GINGER"):
defs['NzA'] = "n/a"
defs['NzB'] = "n/a"
defs['NzC'] = "n/a"
else:
qap_file_name = os.path.join(self.output_dir, "bin/" + self.class_name + QAP);
(defs['NzA'], defs['NzB'], defs['NzC'], defs['num_constraints']) = zcc_parser.generate_zaatar_matrices(spec_file, shuffledIndices, qap_file_name)
spec_file.seek(0)
def generate_prover(self, defs):
if (self.framework=="GINGER"):
t = Template(file=PROVER_GINGER_H_TMPL, searchList=[defs])
else:
t = Template(file=PROVER_ZAATAR_H_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + PROVER_H, t.__str__())
if (self.framework=="GINGER"):
defs['gamma12_file_name'] = "bin/" + self.class_name + GAMMA12;
t = Template(file=PROVER_GINGER_CC_TMPL, searchList=[defs])
else:
t = Template(file=PROVER_ZAATAR_CC_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + PROVER_IMPL, t.__str__())
def generate_input_generator(self, defs):
defs['create_input'] = zcc_parser.generate_create_input()
t = Template(file=VERIFIER_INP_GEN_H_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_INP_GEN_H, t.__str__())
t = Template(file=VERIFIER_INP_GEN_CC_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_INP_GEN_IMPL, t.__str__())
def generate_constants_file(self, spec_path, defs):
defs['constants'] = zcc_parser.generate_constants(spec_path + ".cons")
defs['is_mapred_comp'] = zcc_parser.generate_mapred_header(self.class_name)
#if (self.language == "c"):
# (defs['type_def'], defs['compute_func_definition']) = self.extract_clean_compute_function(self.class_name)
#else:
# (defs['type_def'], defs['compute_func_definition']) = ("", "")
t = Template(file=CONSTANTS_H_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + CONSTANTS_H, t.__str__())
t = Template(file=CONSTANTS_IMPL_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + CONSTANTS_IMPL, t.__str__())
def protect_files(self, defs):
protectedFiles = {(PROVER_EXO_HW_H, PROVER_EXO_HW_H_TMPL),
(PROVER_EXO_HW_IMPL, PROVER_EXO_HW_CC_TMPL),
(VERIFIER_INP_GEN_HW_H, VERIFIER_INP_GEN_HW_H_TMPL),
(VERIFIER_INP_GEN_HW_IMPL, VERIFIER_INP_GEN_HW_CC_TMPL)}
for (targetfile, tmplfile) in protectedFiles:
filename = "apps_sfdl_hw/" + self.class_name + targetfile;
try:
filename_ = os.path.join(self.output_dir, filename)
with file(filename_, 'r'):
os.utime(filename_, None) # Touch it if it exists
except IOError:
#The file doesn't exist, create it:
t = Template(file=tmplfile, searchList=[defs])
self.write_to_file(filename, t.__str__())
def generate_verifier(self, spec_file, defs):
if (self.framework=="GINGER"):
self.write_ginger(defs, spec_file)
else:
self.write_zaatar(defs)
spec_file.seek(0)
def generate_code_from_template(self, spec_path):
spec_file = self.open_spec_file(spec_path)
defs = {}
defs['computation_name'] = os.path.splitext(os.path.split(spec_path)[1])[0]
defs['computation_classname'] = self.class_name
defs['OUTPUT_PREFIX'] = re.sub(r'/',r'_',self.output_prefix).upper()
defs['output_prefix'] = self.output_prefix
self.generate_pws(spec_file, defs)
#number of variables, number of basic constraints (chi) fixed from this point onwards
self.generate_matrices(spec_file, defs)
#Write the prover
self.generate_prover(defs)
#Write the constants file
self.generate_constants_file(spec_path, defs)
self.protect_files(defs)
#Create the input generator
self.generate_input_generator(defs)
# Produce the verifier code (ginger) or A,B,C matrices (Zaatar) and drivers
self.generate_verifier(spec_file, defs)
spec_file.close()
def extract_clean_compute_function(self, class_name):
# this assumes that output_dir is the same as input_dir. Need to fix soon.
filename = os.path.join("../", class_name + ".c")
f = open(filename, 'r')
content = f.readlines()
f.close()
typedefFlag = 1
typedef = ""
compute = ""
# this is really hacky now. Will fix after SOSP.
for line in content:
if not line.startswith("#include"):
if typedefFlag == 1:
if line.startswith("//==========") or line.startswith("void compute("):
typedefFlag = 0
if line.startswith("void compute("):
compute += line
else:
typedef += line
else:
compute += line
return (typedef, compute)
def write_ginger(self, defs, spec_file):
#Write verifier's header
t = Template(file=VERIFIER_GINGER_H_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_H, t.__str__())
#Write verifier's code
gamma0 = zcc_parser.generate_gamma0(spec_file)
gamma12 = zcc_parser.generate_gamma12(spec_file) #these routines generate chi
defs['gamma12_file_name'] = "bin/" + self.class_name + GAMMA12;
defs['gamma0_file_name'] = "bin/" + self.class_name + GAMMA0;
t = Template(file=VERIFIER_GINGER_CC_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_IMPL, t.__str__())
self.write_to_file("bin/"+self.class_name+GAMMA12, gamma12)
self.write_to_file("bin/"+self.class_name+GAMMA0, gamma0)
#Write the driver
defs['comp_parameters'] = zcc_parser.generate_ginger_comp_params()
t = Template(file=MAIN_GINGER_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + ".cpp", t.__str__())
def write_zaatar(self, defs):
#Write verifier's header
t = Template(file=VERIFIER_ZAATAR_H_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_H, t.__str__())
#Write verifier's code
defs['load_qap'] = zcc_parser.generate_load_qap("bin/" + self.class_name + QAP)
t = Template(file=VERIFIER_ZAATAR_CC_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + VERIFIER_IMPL, t.__str__())
#Write A,B,C matrices
#t = Template(file=QAP_TMPL, searchList=[defs])
#self.write_to_file("bin/"+ self.class_name + QAP, t.__str__())
#Write the driver
defs['comp_parameters'] = zcc_parser.generate_zaatar_comp_params()
t = Template(file=MAIN_ZAATAR_TMPL, searchList=[defs])
self.write_to_file(self.output_prefix + ".cpp", t.__str__())
def main():
parser = OptionParser()
parser.add_option("-c", "--classname", dest="classname")
parser.add_option("-s", "--spec_file", dest="spec")
parser.add_option("-o", "--output_prefix", dest="output_prefix")
parser.add_option("-d", "--output_dir", dest="output_dir", default=".")
parser.add_option("-b", "--bugginess", dest="bugginess", default= 0)
parser.add_option("-t", "--framework", dest="framework", default="GINGER")
parser.add_option("-m", "--metrics", dest="metrics", default=0)
parser.add_option("-w", "--worksheetMode", dest="worksheetMode", default=1)
parser.add_option("--db-hash-func", dest="dbHashFunc", default="ggh")
parser.add_option("--db-num-addresses", dest="dbNumAddresses", default="16")
parser.add_option("--ram-cell-num-bits", dest="ramCellNumBits", default="1024")
parser.add_option("--fast-ram-word-width", dest="fastRAMWordWidth", default="64")
parser.add_option("--fast-ram-address-width", dest="fastRAMAddressWidth", default="32")
parser.add_option("--language", dest="language", default="c")
(opt, _) = parser.parse_args()
mandatories = ['output_prefix', 'classname', 'spec', 'framework']
for m in mandatories:
if not opt.__dict__[m]:
parser.print_help()
exit(-1)
zcc_parser.printMetrics = int(opt.metrics)
zcc_parser.proverBugginess = float(opt.bugginess)
zcc_parser.merkle_gen = merkle.MerkleConsGen(opt.dbHashFunc, zcc_parser.verbose)
zcc_parser.db_size = int(opt.dbNumAddresses)
zcc_parser.ram_cell_num_bits = int(opt.ramCellNumBits)
zcc_parser.word_width = int(opt.fastRAMWordWidth)
zcc_parser.address_width = int(opt.fastRAMAddressWidth)
gen = CodeGenerator(opt.output_dir, opt.output_prefix, opt.classname,
opt.framework, int(opt.worksheetMode), opt.language)
gen.generate_code_from_template(opt.spec)
if __name__ == "__main__":
main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import collections
import json
import logging
from google.protobuf import wrappers_pb2
import apache_beam as beam
from apache_beam.coders import WindowedValueCoder
from apache_beam.coders import coder_impl
from apache_beam.internal import pickler
from apache_beam.io import iobase
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners.dataflow.native_io import iobase as native_iobase
from apache_beam.runners.worker import operation_specs
from apache_beam.runners.worker import operations
from apache_beam.utils import counters
from apache_beam.utils import proto_utils
from apache_beam.utils import urns
# This module is experimental. No backwards-compatibility guarantees.
try:
from apache_beam.runners.worker import statesampler
except ImportError:
from apache_beam.runners.worker import statesampler_fake as statesampler
DATA_INPUT_URN = 'urn:org.apache.beam:source:runner:0.1'
DATA_OUTPUT_URN = 'urn:org.apache.beam:sink:runner:0.1'
IDENTITY_DOFN_URN = 'urn:org.apache.beam:dofn:identity:0.1'
PYTHON_ITERABLE_VIEWFN_URN = 'urn:org.apache.beam:viewfn:iterable:python:0.1'
PYTHON_CODER_URN = 'urn:org.apache.beam:coder:python:0.1'
# TODO(vikasrk): Fix this once runner sends appropriate python urns.
PYTHON_DOFN_URN = 'urn:org.apache.beam:dofn:java:0.1'
PYTHON_SOURCE_URN = 'urn:org.apache.beam:source:java:0.1'
def side_input_tag(transform_id, tag):
return str("%d[%s][%s]" % (len(transform_id), transform_id, tag))
class RunnerIOOperation(operations.Operation):
"""Common baseclass for runner harness IO operations."""
def __init__(self, operation_name, step_name, consumers, counter_factory,
state_sampler, windowed_coder, target, data_channel):
super(RunnerIOOperation, self).__init__(
operation_name, None, counter_factory, state_sampler)
self.windowed_coder = windowed_coder
self.step_name = step_name
# target represents the consumer for the bytes in the data plane for a
# DataInputOperation or a producer of these bytes for a DataOutputOperation.
self.target = target
self.data_channel = data_channel
for _, consumer_ops in consumers.items():
for consumer in consumer_ops:
self.add_receiver(consumer, 0)
class DataOutputOperation(RunnerIOOperation):
"""A sink-like operation that gathers outputs to be sent back to the runner.
"""
def set_output_stream(self, output_stream):
self.output_stream = output_stream
def process(self, windowed_value):
self.windowed_coder.get_impl().encode_to_stream(
windowed_value, self.output_stream, True)
def finish(self):
self.output_stream.close()
super(DataOutputOperation, self).finish()
class DataInputOperation(RunnerIOOperation):
"""A source-like operation that gathers input from the runner.
"""
def __init__(self, operation_name, step_name, consumers, counter_factory,
state_sampler, windowed_coder, input_target, data_channel):
super(DataInputOperation, self).__init__(
operation_name, step_name, consumers, counter_factory, state_sampler,
windowed_coder, target=input_target, data_channel=data_channel)
# We must do this manually as we don't have a spec or spec.output_coders.
self.receivers = [
operations.ConsumerSet(self.counter_factory, self.step_name, 0,
consumers.itervalues().next(),
self.windowed_coder)]
def process(self, windowed_value):
self.output(windowed_value)
def process_encoded(self, encoded_windowed_values):
input_stream = coder_impl.create_InputStream(encoded_windowed_values)
while input_stream.size() > 0:
decoded_value = self.windowed_coder.get_impl().decode_from_stream(
input_stream, True)
self.output(decoded_value)
# TODO(robertwb): Revise side input API to not be in terms of native sources.
# This will enable lookups, but there's an open question as to how to handle
# custom sources without forcing intermediate materialization. This seems very
# related to the desire to inject key and window preserving [Splittable]DoFns
# into the view computation.
class SideInputSource(native_iobase.NativeSource,
native_iobase.NativeSourceReader):
"""A 'source' for reading side inputs via state API calls.
"""
def __init__(self, state_handler, state_key, coder):
self._state_handler = state_handler
self._state_key = state_key
self._coder = coder
def reader(self):
return self
@property
def returns_windowed_values(self):
return True
def __enter__(self):
return self
def __exit__(self, *exn_info):
pass
def __iter__(self):
# TODO(robertwb): Support pagination.
input_stream = coder_impl.create_InputStream(
self._state_handler.Get(self._state_key).data)
while input_stream.size() > 0:
yield self._coder.get_impl().decode_from_stream(input_stream, True)
def memoize(func):
cache = {}
missing = object()
def wrapper(*args):
result = cache.get(args, missing)
if result is missing:
result = cache[args] = func(*args)
return result
return wrapper
def only_element(iterable):
element, = iterable
return element
class BundleProcessor(object):
"""A class for processing bundles of elements.
"""
def __init__(
self, process_bundle_descriptor, state_handler, data_channel_factory):
self.process_bundle_descriptor = process_bundle_descriptor
self.state_handler = state_handler
self.data_channel_factory = data_channel_factory
def create_execution_tree(self, descriptor):
# TODO(robertwb): Figure out the correct prefix to use for output counters
# from StateSampler.
counter_factory = counters.CounterFactory()
state_sampler = statesampler.StateSampler(
'fnapi-step%s-' % descriptor.id, counter_factory)
transform_factory = BeamTransformFactory(
descriptor, self.data_channel_factory, counter_factory, state_sampler,
self.state_handler)
pcoll_consumers = collections.defaultdict(list)
for transform_id, transform_proto in descriptor.transforms.items():
for pcoll_id in transform_proto.inputs.values():
pcoll_consumers[pcoll_id].append(transform_id)
@memoize
def get_operation(transform_id):
transform_consumers = {
tag: [get_operation(op) for op in pcoll_consumers[pcoll_id]]
for tag, pcoll_id
in descriptor.transforms[transform_id].outputs.items()
}
return transform_factory.create_operation(
transform_id, transform_consumers)
# Operations must be started (hence returned) in order.
@memoize
def topological_height(transform_id):
return 1 + max(
[0] +
[topological_height(consumer)
for pcoll in descriptor.transforms[transform_id].outputs.values()
for consumer in pcoll_consumers[pcoll]])
return [get_operation(transform_id)
for transform_id in sorted(
descriptor.transforms, key=topological_height, reverse=True)]
def process_bundle(self, instruction_id):
ops = self.create_execution_tree(self.process_bundle_descriptor)
expected_inputs = []
for op in ops:
if isinstance(op, DataOutputOperation):
# TODO(robertwb): Is there a better way to pass the instruction id to
# the operation?
op.set_output_stream(op.data_channel.output_stream(
instruction_id, op.target))
elif isinstance(op, DataInputOperation):
# We must wait until we receive "end of stream" for each of these ops.
expected_inputs.append(op)
# Start all operations.
for op in reversed(ops):
logging.info('start %s', op)
op.start()
# Inject inputs from data plane.
for input_op in expected_inputs:
for data in input_op.data_channel.input_elements(
instruction_id, [input_op.target]):
# ignores input name
input_op.process_encoded(data.data)
# Finish all operations.
for op in ops:
logging.info('finish %s', op)
op.finish()
class BeamTransformFactory(object):
"""Factory for turning transform_protos into executable operations."""
def __init__(self, descriptor, data_channel_factory, counter_factory,
state_sampler, state_handler):
self.descriptor = descriptor
self.data_channel_factory = data_channel_factory
self.counter_factory = counter_factory
self.state_sampler = state_sampler
self.state_handler = state_handler
self.context = pipeline_context.PipelineContext(descriptor)
_known_urns = {}
@classmethod
def register_urn(cls, urn, parameter_type):
def wrapper(func):
cls._known_urns[urn] = func, parameter_type
return func
return wrapper
def create_operation(self, transform_id, consumers):
transform_proto = self.descriptor.transforms[transform_id]
creator, parameter_type = self._known_urns[transform_proto.spec.urn]
payload = proto_utils.parse_Bytes(
transform_proto.spec.payload, parameter_type)
return creator(self, transform_id, transform_proto, payload, consumers)
def get_coder(self, coder_id):
coder_proto = self.descriptor.coders[coder_id]
if coder_proto.spec.spec.urn:
return self.context.coders.get_by_id(coder_id)
else:
# No URN, assume cloud object encoding json bytes.
return operation_specs.get_coder_from_spec(
json.loads(coder_proto.spec.spec.payload))
def get_output_coders(self, transform_proto):
return {
tag: self.get_coder(self.descriptor.pcollections[pcoll_id].coder_id)
for tag, pcoll_id in transform_proto.outputs.items()
}
def get_only_output_coder(self, transform_proto):
return only_element(self.get_output_coders(transform_proto).values())
def get_input_coders(self, transform_proto):
return {
tag: self.get_coder(self.descriptor.pcollections[pcoll_id].coder_id)
for tag, pcoll_id in transform_proto.inputs.items()
}
def get_only_input_coder(self, transform_proto):
return only_element(self.get_input_coders(transform_proto).values())
# TODO(robertwb): Update all operations to take these in the constructor.
@staticmethod
def augment_oldstyle_op(op, step_name, consumers, tag_list=None):
op.step_name = step_name
for tag, op_consumers in consumers.items():
for consumer in op_consumers:
op.add_receiver(consumer, tag_list.index(tag) if tag_list else 0)
return op
@BeamTransformFactory.register_urn(
DATA_INPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(transform_proto.outputs.keys()))
return DataInputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
factory.get_only_output_coder(transform_proto),
input_target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(
DATA_OUTPUT_URN, beam_fn_api_pb2.RemoteGrpcPort)
def create(factory, transform_id, transform_proto, grpc_port, consumers):
target = beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id,
name=only_element(transform_proto.inputs.keys()))
return DataOutputOperation(
transform_proto.unique_name,
transform_proto.unique_name,
consumers,
factory.counter_factory,
factory.state_sampler,
# TODO(robertwb): Perhaps this could be distinct from the input coder?
factory.get_only_input_coder(transform_proto),
target=target,
data_channel=factory.data_channel_factory.create_data_channel(grpc_port))
@BeamTransformFactory.register_urn(PYTHON_SOURCE_URN, None)
def create(factory, transform_id, transform_proto, parameter, consumers):
# The Dataflow runner harness strips the base64 encoding.
source = pickler.loads(base64.b64encode(parameter))
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[WindowedValueCoder(source.default_output_coder())])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(
urns.READ_TRANSFORM, beam_runner_api_pb2.ReadPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
# The Dataflow runner harness strips the base64 encoding.
source = iobase.SourceBase.from_runner_api(parameter.source, factory.context)
spec = operation_specs.WorkerRead(
iobase.SourceBundle(1.0, source, None, None),
[WindowedValueCoder(source.default_output_coder())])
return factory.augment_oldstyle_op(
operations.ReadOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
@BeamTransformFactory.register_urn(PYTHON_DOFN_URN, None)
def create(factory, transform_id, transform_proto, parameter, consumers):
dofn_data = pickler.loads(parameter)
if len(dofn_data) == 2:
# Has side input data.
serialized_fn, side_input_data = dofn_data
else:
# No side input data.
serialized_fn, side_input_data = parameter.value, []
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, side_input_data)
@BeamTransformFactory.register_urn(
urns.PARDO_TRANSFORM, beam_runner_api_pb2.ParDoPayload)
def create(factory, transform_id, transform_proto, parameter, consumers):
assert parameter.do_fn.spec.urn == urns.PICKLED_DO_FN_INFO
serialized_fn = parameter.do_fn.spec.payload
dofn_data = pickler.loads(serialized_fn)
if len(dofn_data) == 2:
# Has side input data.
serialized_fn, side_input_data = dofn_data
else:
# No side input data.
side_input_data = []
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, side_input_data)
def _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, side_input_data):
def create_side_input(tag, coder):
# TODO(robertwb): Extract windows (and keys) out of element data.
# TODO(robertwb): Extract state key from ParDoPayload.
return operation_specs.WorkerSideInputSource(
tag=tag,
source=SideInputSource(
factory.state_handler,
beam_fn_api_pb2.StateKey.MultimapSideInput(
key=side_input_tag(transform_id, tag)),
coder=coder))
output_tags = list(transform_proto.outputs.keys())
# Hack to match out prefix injected by dataflow runner.
def mutate_tag(tag):
if 'None' in output_tags:
if tag == 'None':
return 'out'
else:
return 'out_' + tag
else:
return tag
dofn_data = pickler.loads(serialized_fn)
if not dofn_data[-1]:
# Windowing not set.
pcoll_id, = transform_proto.inputs.values()
windowing = factory.context.windowing_strategies.get_by_id(
factory.descriptor.pcollections[pcoll_id].windowing_strategy_id)
serialized_fn = pickler.dumps(dofn_data[:-1] + (windowing,))
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=serialized_fn,
output_tags=[mutate_tag(tag) for tag in output_tags],
input=None,
side_inputs=[
create_side_input(tag, coder) for tag, coder in side_input_data],
output_coders=[output_coders[tag] for tag in output_tags])
return factory.augment_oldstyle_op(
operations.DoOperation(
transform_proto.unique_name,
spec,
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers,
output_tags)
def _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers, dofn):
serialized_fn = pickler.dumps((dofn, (), {}, [], None))
side_input_data = []
return _create_pardo_operation(
factory, transform_id, transform_proto, consumers,
serialized_fn, side_input_data)
@BeamTransformFactory.register_urn(
urns.GROUP_ALSO_BY_WINDOW_TRANSFORM, wrappers_pb2.BytesValue)
def create(factory, transform_id, transform_proto, parameter, consumers):
# Perhaps this hack can go away once all apply overloads are gone.
from apache_beam.transforms.core import _GroupAlsoByWindowDoFn
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
_GroupAlsoByWindowDoFn(
factory.context.windowing_strategies.get_by_id(parameter.value)))
@BeamTransformFactory.register_urn(
urns.WINDOW_INTO_TRANSFORM, beam_runner_api_pb2.WindowingStrategy)
def create(factory, transform_id, transform_proto, parameter, consumers):
class WindowIntoDoFn(beam.DoFn):
def __init__(self, windowing):
self.windowing = windowing
def process(self, element, timestamp=beam.DoFn.TimestampParam):
new_windows = self.windowing.windowfn.assign(
WindowFn.AssignContext(timestamp, element=element))
yield WindowedValue(element, timestamp, new_windows)
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.window import WindowFn, WindowedValue
windowing = Windowing.from_runner_api(parameter, factory.context)
return _create_simple_pardo_operation(
factory, transform_id, transform_proto, consumers,
WindowIntoDoFn(windowing))
@BeamTransformFactory.register_urn(IDENTITY_DOFN_URN, None)
def create(factory, transform_id, transform_proto, unused_parameter, consumers):
return factory.augment_oldstyle_op(
operations.FlattenOperation(
transform_proto.unique_name,
operation_specs.WorkerFlatten(
None, [factory.get_only_output_coder(transform_proto)]),
factory.counter_factory,
factory.state_sampler),
transform_proto.unique_name,
consumers)
|
|
#!/usr/bin/env python2
'''
Fetch and combine multiple inventory account settings into a single
json hash.
'''
# vim: expandtab:tabstop=4:shiftwidth=4
from string import Template
from time import time
import argparse
import atexit
import copy
import errno
import fcntl
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import yaml
CONFIG_FILE_NAME = 'multi_inventory.yaml'
DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)))
# FIXME Move this list of boolean variable names into the
# config file, maybe as a "boolean_clone_groups" key?
BOOLEAN_CLONE_GROUPS = ('oo_config', 'oo_provision', 'oo_scalegroup')
class MultiInventoryAccount(object):
"""Object to represent an account"""
#pylint: disable=too-many-arguments
def __init__(self, name, config, cache_path, cache_max_age=1800, group_whitelist=None):
self.name = name
self.config = config
self.group_whitelist = group_whitelist
self._inventory = None
# set to global cache_max_age
self.cache_max_age = cache_max_age
# if an account setting exists, set to account cache_max_age
if self.config.has_key('cache_max_age'):
self.cache_max_age = self.config['cache_max_age']
# set to global account cache_path
self.cache_path = os.path.expanduser(cache_path)
# if an account setting exists, set to account cache_path
if self.config.has_key('cache_path'):
self.cache_path = os.path.expanduser(self.config['cache_path'])
# collect a list of clusters for this account
self.clusters = []
if self.config.has_key('cluster_vars') and \
self.config['cluster_vars'].has_key('clusters'):
self.clusters = self.config['cluster_vars']['clusters'].keys()
@property
def inventory(self):
"""property for inventory"""
if self._inventory is None:
self._inventory = MultiInventoryUtils.get_inventory_from_cache(os.path.join(self.cache_path, self.name))
if self._inventory is None:
self._inventory = {}
else:
self.apply_account_config()
return self._inventory
@inventory.setter
def inventory(self, data):
"""setter for inventory"""
self._inventory = data
self.apply_account_config()
@staticmethod
def run_account_providers(accounts, force=False, validate_cache=True, account=None, debug=False):
"""Setup the provider call with proper variables and call the account's run_provider"""
try:
updated = False
# if account parameter is present we only want refresh that specific account
tmp_accounts = accounts
if account:
tmp_accounts = [acc for acc in accounts if acc.name == account]
processes = {}
for acc in tmp_accounts:
if debug:
print "Calling run_provider for account: %s" % acc.name
processes[acc.name] = acc.run_provider(force=force, validate_cache=validate_cache)
# for each process collect stdout when its available
for acc in tmp_accounts:
if not processes[acc.name]['cached']:
proc = processes[acc.name]['proc']
out, err = proc.communicate()
# For any non-zero, raise an error on it
if proc.returncode != 0:
err_msg = 'Account: %s Error_Code: %s StdErr: [%s] Stdout: [%s]\n' % \
(acc.name, proc.returncode, err.replace('\n', '\\n'), out.replace('\n', '\\n'))
#raise RuntimeError('\n'.join(err_msg))
sys.stderr.write(err_msg)
else:
updated = True
# The reason this exists here is that we
# are using the futures pattern. The account object
# has run a subprocess and we are collecting the results.
data = json.loads(out)
acc.write_to_cache(data)
acc.inventory = data
except ValueError as exc:
print exc.message
sys.exit(1)
return updated
def empty_creds(self):
'''test whether credentials are empty'''
env = self.config.get('env_vars', {})
if 'ec2' in self.config['provider'] and (\
not env.has_key('AWS_ACCESS_KEY_ID') or not env['AWS_ACCESS_KEY_ID'] or \
not env.has_key('AWS_SECRET_ACCESS_KEY') or not env['AWS_SECRET_ACCESS_KEY']):
return True
return False
def run_provider(self, force=False, validate_cache=False):
"""Setup the provider call with proper variables and call self.get_provider_tags
force: Whether to force an update on the account
validate_cache: Whether or not to validate the cache.
This happens in case of an account update
"""
# verify creds are in place. Currently only checks aws accounts.
# on new accounts, the time between account setup and install
# can vary and we'd like to ignore accounts that are not provisioned.
# In the above case, return:
# cached: True and 'data': {}
if self.empty_creds():
return {'cached': True, 'data': self.inventory}
# force an update??
if not force:
# check if we want to validate the cache or if the cache is valid
if not validate_cache or \
MultiInventoryUtils.is_cache_valid(os.path.join(self.cache_path, self.name), self.cache_max_age):
return {'cached': True, 'data': self.inventory}
if self.config.has_key('provider_files'):
tmp_dir = self.generate_config()
# Update env vars after creating provider_config_files
# so that we can grab the tmp_dir if it exists
env = self.config.get('env_vars', {})
if env and tmp_dir:
for key, value in env.items():
if value is None:
value = ''
env[key] = Template(value).substitute(tmpdir=tmp_dir)
if not env:
env = os.environ
provider = self.config['provider']
# Allow relatively path'd providers in config file
if os.path.isfile(os.path.join(FILE_PATH, self.config['provider'])):
provider = os.path.join(FILE_PATH, self.config['provider'])
# check to see if provider exists
if not os.path.isfile(provider) or not os.access(provider, os.X_OK):
raise RuntimeError("Problem with the provider. Please check path " \
"and that it is executable. (%s)" % provider)
cmds = [provider, '--list']
if 'aws' in provider.lower():
cmds.append('--refresh-cache')
return {'cached': False,
'proc': subprocess.Popen(cmds, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)}
def apply_cluster_vars(self):
"""Apply the account config cluster vars"""
# cluster vars go here
# do nothing for accounts that do not have cluster vars
if not self.config.get('cluster_vars', None):
return
cluster_tag = self.config['cluster_vars']['cluster_tag']
synthetic_hosts = self.config['cluster_vars'].get('synthetic_hosts', False)
for cluster_name, cluster in self.config['cluster_vars']['clusters'].items():
for host in self.inventory['_meta']['hostvars'].values():
clusterid = MultiInventoryUtils.get_entry(host, cluster_tag)
if clusterid == cluster_name:
MultiInventoryUtils.add_entry(host, 'oo_clusterid', clusterid)
for new_var, value in cluster.items():
MultiInventoryUtils.add_entry(host, new_var, value)
# Apply synthetic host groups for boot strapping purposes
if synthetic_hosts:
synth_host = 'synthetic_%s' % cluster_name
self.inventory['_meta']['hostvars'][synth_host] = {'oo_clusterid': cluster_name,
'synthetic' : True}
for new_var, value in cluster.items():
self.inventory['_meta']['hostvars'][synth_host][new_var] = value
if not self.inventory.has_key('synthetic_hosts'):
self.inventory['synthetic_hosts'] = []
self.inventory['synthetic_hosts'].append(synth_host)
def apply_extra_vars(self):
"""Apply the account config extra vars """
# Extra vars go here
for new_var, value in self.config.get('extra_vars', {}).items():
for data in self.inventory['_meta']['hostvars'].values():
MultiInventoryUtils.add_entry(data, new_var, value)
# pylint: disable=too-many-branches,too-many-nested-blocks
def apply_clone_vars(self):
"""Apply the account config clone vars """
# Clone vars go here
for to_name, from_name in self.config.get('clone_vars', {}).items():
for name, data in self.inventory['_meta']['hostvars'].items():
# we need to handle oo_name
# if gce_name or ec2_tag_Name is defined, use it for legacy mode
# Next, we process the from_name if its a list and build the names
if to_name == 'oo_name':
if 'synthetic_' in name:
MultiInventoryUtils.add_entry(data, to_name, name)
# REMOVE when v2 goes extinct
if 'oo_version' in data and data['oo_version'] == 2:
MultiInventoryUtils.add_entry(data, to_name, MultiInventoryUtils.get_entry(data, name))
continue
# if name tag exists give it!
if 'ec2_tag_Name' in data:
# pylint: disable=line-too-long
MultiInventoryUtils.add_entry(data, to_name, MultiInventoryUtils.get_entry(data, 'ec2_tag_Name'))
# if name tag exists give it!
elif 'gce_name' in data:
# pylint: disable=line-too-long
MultiInventoryUtils.add_entry(data, to_name, MultiInventoryUtils.get_entry(data, 'gce_name'))
# process the format of {['host-type': 'master', 'name': [attr1, attr2]]}
elif isinstance(from_name, list):
name_vars = []
for name_info in from_name:
# pylint: disable=line-too-long
if (('ec2_tag_host-type' in data and data['ec2_tag_host-type'] == name_info['host-type']) or
('gce_metadata' in data and data['gce_metadata']['host-type'] == name_info['host-type'])):
name_vars = [data.get(tag, 'nil') for tag in name_info['name']]
value = '-'.join(name_vars)
MultiInventoryUtils.add_entry(data, to_name, value)
continue
# user wants something else for from_name
elif from_name in data:
MultiInventoryUtils.add_entry(data, to_name, MultiInventoryUtils.get_entry(data, from_name))
# We didn't match a host-type so assign the current name to the oo_name (best attempt)
if 'oo_name' not in data:
MultiInventoryUtils.add_entry(data, to_name, name)
continue
MultiInventoryUtils.add_entry(data, to_name, MultiInventoryUtils.get_entry(data, from_name))
def apply_extra_groups(self):
"""Apply the account config for extra groups """
for new_var, value in self.config.get('extra_groups', {}).items():
for name, _ in self.inventory['_meta']['hostvars'].items():
if 'synthetic_' in name:
continue
self.inventory["%s_%s" % (new_var, value)] = copy.copy(self.inventory['all_hosts'])
def apply_clone_groups(self):
"""Apply the account config for clone groups """
for to_name, from_name in self.config.get('clone_groups', {}).items():
for name, data in self.inventory['_meta']['hostvars'].items():
if 'synthetic_' in name:
continue
val = MultiInventoryUtils.get_entry(data, from_name)
# Convert boolean variables to 'True' or 'False'.
if to_name in BOOLEAN_CLONE_GROUPS:
if isinstance(val, basestring):
val = str(val.lower() == 'true')
else: # Mainly for None -> False
val = str(bool(val))
key = '%s_%s' % (to_name, val)
self.inventory.setdefault(key, []).append(name)
def apply_group_selectors(self):
"""Apply the account config for group selectors """
# There could be multiple clusters per account. We need to process these selectors
# based upon the oo_clusterid_ variable.
clusterids = [group for group in self.inventory if "oo_clusterid_" in group]
for clusterid in clusterids:
for selector in self.config.get('group_selectors', {}):
if self.inventory.has_key(selector['from_group']):
hosts = list(set(self.inventory[clusterid]) & set(self.inventory[selector['from_group']]))
hosts.sort()
# Multiple clusters in an account
if self.inventory.has_key(selector['name']):
self.inventory[selector['name']].extend(hosts[0:selector['count']])
else:
self.inventory[selector['name']] = hosts[0:selector['count']]
for host in hosts:
# pylint: disable=line-too-long
self.inventory['_meta']['hostvars'][host][selector['name']] = host in self.inventory[selector['name']]
def apply_account_config(self):
""" Apply account config settings """
self.apply_cluster_vars()
self.apply_extra_vars()
self.apply_clone_vars()
# apply_cluster_vars creates oo_name and also synthetic hosts.
# apply_clone_vars sets oo_name when its not defined already.
# We need to copy the new hosts into their oo_name and use it for their
# inventory name. This code copies the hosts and renames them with oo_name
# skip synthetic_hosts
hosts = {}
syn = []
for name, host in self.inventory['_meta']['hostvars'].items():
if 'synthetic_' in name:
syn.append(name)
hosts[name] = host
elif 'oo_name' in host and host['oo_name'] is not None:
hosts[host['oo_name']] = host
continue
elif name is None:
hosts[host['oo_public_ip']] = host
# last ditch effort, give the host its dns name
else:
hosts[name] = host
self.inventory['_meta']['hostvars'] = hosts
self.inventory['all_hosts'] = list(set(self.inventory['_meta']['hostvars'].keys()) - set(syn))
self.apply_extra_groups()
self.apply_clone_groups()
self.apply_group_selectors()
######################################################################
# If no groups specified in the whitelist then return all groups
######################################################################
if self.group_whitelist is None or len(self.group_whitelist) == 0:
return
######################################################################
# remove any extra groups that are not provided in the group_whitelist
######################################################################
for key in self.inventory.keys():
match = False
for regex in self.group_whitelist:
if re.search(regex, key) is not None:
match = True
break
if match:
continue
del self.inventory[key]
def generate_config(self):
"""Generate the provider_files in a temporary directory"""
prefix = 'multi_inventory.'
tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
atexit.register(MultiInventoryUtils.cleanup, [tmp_dir_path])
for provider_file in self.config['provider_files']:
with open(os.path.join(tmp_dir_path, provider_file['name']), 'w+') as filedes:
content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
filedes.write(content)
return tmp_dir_path
def write_to_cache(self, data):
'''account cache writer'''
MultiInventoryUtils.write_to_cache(os.path.join(self.cache_path, self.name), data)
class MultiInventoryException(Exception):
'''Exceptions for MultiInventory class'''
pass
# Need to figure out how to reduce the attributes. Most are used for default settings
# and determining cache, cache_max_age, results, and storing account data
class MultiInventory(object):
'''
MultiInventory class:
Opens a yaml config file and reads aws credentials.
Stores a json hash of resources in result.
'''
def __init__(self, args=None):
# Allow args to be passed when called as a library
self.args = {}
if args != None:
self.args = args
self.accounts = []
self.cache_max_age = 1800
self.cache_path = DEFAULT_CACHE_PATH
self.config = None
self.config_file = None
# Store each individual restuls by account name in this variable
self.all_inventory_results = {}
def run(self):
'''This method checks to see if the local cache is valid for the inventory.
if the cache is valid; return cache
else the credentials are loaded from multi_inventory.yaml or from the env
and we attempt to get the inventory from the provider specified.
'''
# Finish loading configuration files
self.load_config_settings()
results = {}
# --refresh
# Either force a refresh on the cache or validate it
if self.args.get('refresh_cache', None):
results = self.get_inventory()
#--from-cache was called.
elif self.args.get('from_cache', False):
results = MultiInventoryUtils.get_inventory_from_cache(self.cache_path)
#--list was called.
elif not MultiInventoryUtils.is_cache_valid(self.cache_path, self.cache_max_age):
results = self.get_inventory()
else:
# get data from disk
results = MultiInventoryUtils.get_inventory_from_cache(self.cache_path)
return results
def load_config_settings(self):
"""Setup the config settings for cache, config file, etc"""
# Prefer a file in the same directory, fall back to a file in etc
if os.path.isfile(os.path.join(FILE_PATH, CONFIG_FILE_NAME)):
self.config_file = os.path.join(FILE_PATH, CONFIG_FILE_NAME)
elif os.path.isfile(os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)):
self.config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
else:
self.config_file = None # expect env vars
# load yaml
if self.config_file and os.path.isfile(self.config_file):
self.config = self.load_yaml_config()
self.cache_max_age = self.config['cache_max_age']
elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
# Build a default config
self.config = {}
# pylint: disable=line-too-long
self.config['accounts'] = {'default': {'cache_location': DEFAULT_CACHE_PATH,
'provider': 'aws/hosts/ec2.py',
'env_vars': {'AWS_ACCESS_KEY_ID': os.environ["AWS_ACCESS_KEY_ID"],
'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
}
}
}
self.cache_max_age = 300
else:
raise RuntimeError("Could not find valid ec2 credentials in the environment.")
if self.config.has_key('cache_location'):
self.cache_path = self.config['cache_location']
def load_yaml_config(self, conf_file=None):
"""Load a yaml config file with credentials to query the respective cloud for inventory"""
config = None
if not conf_file:
conf_file = self.config_file
with open(conf_file) as conf:
config = yaml.safe_load(conf)
return config
def build_accounts(self):
'''create an account array and return it'''
self.accounts = []
account_names = []
for acc_name, account in self.config['accounts'].items():
if acc_name in account_names:
raise MultiInventoryException('Multiple accounts exist with the same name. ' \
'This is not permitted. name=[%s]' % acc_name)
account_names.append(acc_name)
self.accounts.append(MultiInventoryAccount(acc_name,
account,
self.config.get('cache_account_location', '/tmp/account'),
self.config.get('cache_max_age', 1800),
self.config.get('group_whitelist', [])))
def get_account_from_cluster(self, cluster):
'''return the account if it has the specified cluster'''
for account in self.accounts:
if cluster in account.clusters:
return account.name
raise MultiInventoryException('Cluster [%s] does not exist. Exiting.' % cluster)
def get_inventory(self):
"""Create the subprocess to fetch tags from a provider.
Query all of the different accounts for their tags. Once completed
store all of their results into one merged updated hash.
"""
# instantiate all the accounts
self.build_accounts()
# Determine if an account was passed from the cli or args
account = None
if self.args.get('account', None):
account = self.args['account']
# Validate the account exists
if account not in [acc.name for acc in self.accounts]:
raise MultiInventoryException('Account [%s] does not exist. Exiting.' % account)
# find the specified account by cluster
elif self.args.get('cluster', None):
account = self.get_account_from_cluster(self.args['cluster'])
# Run the account providers. If account is set, only update the desired account
updated = MultiInventoryAccount.run_account_providers(self.accounts,
force=self.args.get('refresh_cache', False),
validate_cache=self.args.get('list', True),
account=account,
debug=self.args.get('debug', False))
all_results = {}
#values = self.accounts[0].inventory.values()
#values.insert(0, all_results)
for account in self.accounts:
# Build results by merging all dictionaries
MultiInventoryUtils.merge_destructively(all_results, account.inventory)
# Ensure all boolean groups are defined.
for name in BOOLEAN_CLONE_GROUPS:
for bool_val in (True, False):
key = '%s_%s' % (name, str(bool_val))
all_results.setdefault(key, [])
# only write cache if something was updated
# in the run_account_provider.
if updated:
MultiInventoryUtils.write_to_cache(self.cache_path, all_results)
return all_results
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on a provider')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Fetch cached only instances (default: False)')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--cluster', action='store', default=False,
help='Refresh an account by cluster name')
parser.add_argument('--account', action='store', default=False,
help='Refresh an account')
parser.add_argument('--debug', action='store_true', default=False,
help='Whether to print debug')
parser.add_argument('--from-cache', action='store_true', default=False,
help='Whether to pull from cache')
self.args = parser.parse_args().__dict__
class MultiInventoryUtils(object):
"""place for shared utilities"""
@staticmethod
def write_to_cache(cache_path, data):
"""Writes data in JSON format to cache_path"""
# if it does not exist, try and create it.
if not os.path.isfile(cache_path):
path = os.path.dirname(cache_path)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
json_data = MultiInventoryUtils.json_format_dict(data, True)
with open(cache_path, 'w') as cache:
try:
fcntl.flock(cache, fcntl.LOCK_EX)
cache.write(json_data)
finally:
fcntl.flock(cache, fcntl.LOCK_UN)
@staticmethod
def json_format_dict(data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
@staticmethod
def get_inventory_from_cache(cache_path):
"""Reads the inventory from the cache file and returns it as a JSON object"""
results = None
if not os.path.isfile(cache_path):
return results
with open(cache_path, 'r') as cache:
results = json.loads(cache.read())
return results
@staticmethod
def is_cache_valid(cache_path, age=1800):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(cache_path):
mod_time = os.path.getmtime(cache_path)
current_time = time()
if (mod_time + age) > current_time:
return True
return False
@staticmethod
def cleanup(files):
"""Clean up on exit """
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def merge_destructively(input_a, input_b):
"""merges input_b into input_a"""
for key in input_b:
if key in input_a:
if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
MultiInventoryUtils.merge_destructively(input_a[key], input_b[key])
elif input_a[key] == input_b[key]:
pass # same leaf value
# both lists so add each element in b to a if it does ! exist
elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
for result in input_b[key]:
if result not in input_a[key]:
input_a[key].append(result)
# a is a list and not b
elif isinstance(input_a[key], list):
if input_b[key] not in input_a[key]:
input_a[key].append(input_b[key])
elif isinstance(input_b[key], list):
input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
else:
input_a[key] = [input_a[key], input_b[key]]
else:
input_a[key] = input_b[key]
return input_a
@staticmethod
def add_entry(data, keys, item):
"""Add an item to a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
item = c
"""
if "." in keys:
key, rest = keys.split(".", 1)
if key not in data:
data[key] = {}
MultiInventoryUtils.add_entry(data[key], rest, item)
else:
data[keys] = item
@staticmethod
def get_entry(data, keys):
""" Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
keys = a.b
return c
"""
if keys and "." in keys:
key, rest = keys.split(".", 1)
if data.has_key(key):
return MultiInventoryUtils.get_entry(data[key], rest)
return None
else:
return data.get(keys, None)
if __name__ == "__main__":
MI2 = MultiInventory()
MI2.parse_cli_args()
print MultiInventoryUtils.json_format_dict(MI2.run(), True)
|
|
__author__ = 'maru'
__copyright__ = "Copyright 2013, ML Lab"
__version__ = "0.1"
__status__ = "Development"
import sys
import os
sys.path.append(os.path.abspath("."))
from experiment_utils import *
import argparse
import numpy as np
from sklearn.datasets.base import Bunch
from datautil.load_data import *
from sklearn import linear_model
import time
from sklearn import metrics
from collections import defaultdict
from datautil.textutils import StemTokenizer
from strategy import randomsampling
from expert import baseexpert
from sklearn.feature_extraction.text import CountVectorizer
import pickle
############# COMMAND LINE PARAMETERS ##################
ap = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
ap.add_argument('--train',
metavar='TRAIN',
default="20news",
help='training data (libSVM format)')
ap.add_argument('--neutral-threshold',
metavar='NEUTRAL',
type=float,
default=.4,
help='neutrality threshold of uncertainty')
ap.add_argument('--expert-penalty',
metavar='EXPERT_PENALTY',
type=float,
default=0.3,
help='Expert penalty value for the classifier simulation')
ap.add_argument('--trials',
metavar='TRIALS',
type=int,
default=5,
help='number of trials')
ap.add_argument('--folds',
metavar='FOLDS',
type=int,
default=1,
help='number of folds')
ap.add_argument('--budget',
metavar='BUDGET',
type=int,
default=20000,
help='budget')
ap.add_argument('--step-size',
metavar='STEP_SIZE',
type=int,
default=10,
help='instances to acquire at every iteration')
ap.add_argument('--bootstrap',
metavar='BOOTSTRAP',
type=int,
default=50,
help='size of the initial labeled dataset')
ap.add_argument('--cost-function',
metavar='COST_FUNCTION',
type=str,
default="direct",
help='cost function of the x-axis [uniform|log|linear|direct]')
ap.add_argument('--cost-model',
metavar='COST_MODEL',
type=str,
default="[[10.0,5.7], [25.0,8.2], [50.1,10.9], [75,15.9], [100,16.7], [125,17.8], [150,22.7], [175,19.9], [200,17.4]]",
help='cost function parameters of the cost function')
ap.add_argument('--fixk',
metavar='FIXK',
type=int,
default=10,
help='fixed k number of words')
ap.add_argument('--maxiter',
metavar='MAXITER',
type=int,
default=5,
help='Max number of iterations')
ap.add_argument('--seed',
metavar='SEED',
type=int,
default=8765432,
help='Max number of iterations')
ap.add_argument('--method',
metavar='METHOD',
type=str,
default="unc",
help='Sampling method [rnd|unc]')
ap.add_argument('--classifier',
metavar='CLASSIFIER',
type=str,
default="lr",
help='underlying classifier')
args = ap.parse_args()
rand = np.random.mtrand.RandomState(args.seed)
print args
print
####################### MAIN ####################
def main():
accuracies = defaultdict(lambda: [])
aucs = defaultdict(lambda: [])
x_axis = defaultdict(lambda: [])
vct = CountVectorizer(encoding='ISO-8859-1', min_df=5, max_df=1.0, binary=True, ngram_range=(1, 3),
token_pattern='\\b\\w+\\b', tokenizer=StemTokenizer())
vct_analizer = vct.build_tokenizer()
print("Start loading ...")
# data fields: data, bow, file_names, target_names, target
########## NEWS GROUPS ###############
# easy to hard. see "Less is More" paper: http://axon.cs.byu.edu/~martinez/classes/678/Presentations/Clawson.pdf
categories = [['alt.atheism', 'talk.religion.misc'],
['comp.graphics', 'comp.windows.x'],
['comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware'],
['rec.sport.baseball', 'sci.crypt']]
min_size = max(100, args.fixk)
if args.fixk < 0:
args.fixk = None
data, vct = load_from_file(args.train, categories, args.fixk, min_size, vct)
# data = load_dataset(args.train, args.fixk, categories[0], vct, min_size)
print("Data %s" % args.train)
print("Data size %s" % len(data.train.data))
parameters = parse_parameters_mat(args.cost_model)
print "Cost Parameters %s" % parameters
cost_model = set_cost_model(args.cost_function, parameters=parameters)
print "\nCost Model: %s" % cost_model.__class__.__name__
#### STUDENT CLASSIFIER
clf = set_classifier(args.classifier)
print "\nClassifier: %s" % clf
#### EXPERT CLASSIFIER
exp_clf = linear_model.LogisticRegression(penalty='l1', C=args.expert_penalty)
exp_clf.fit(data.test.bow, data.test.target)
expert = baseexpert.NeutralityExpert(exp_clf, threshold=args.neutral_threshold,
cost_function=cost_model.cost_function)
print "\nExpert: %s " % expert
#### ACTIVE LEARNING SETTINGS
step_size = args.step_size
bootstrap_size = args.bootstrap
evaluation_points = 200
print("\nExperiment: step={0}, BT={1}, plot points={2}, fixk:{3}, minsize:{4}".format(step_size, bootstrap_size,
evaluation_points, args.fixk,
min_size))
print ("Cheating experiment - use full uncertainty query k words")
t0 = time.time()
### experiment starts
tx =[]
tac = []
tau = []
for t in range(args.trials):
trial_accu =[]
trial_aucs = []
trial_x_axis = []
print "*" * 60
print "Trial: %s" % t
student = randomsampling.UncertaintyLearner(model=clf, accuracy_model=None, budget=args.budget, seed=t,
subpool=250)
print "\nStudent: %s " % student
train_indices = []
train_x = []
train_y = []
pool = Bunch()
pool.data = data.train.bow.tocsr() # full words, for training
pool.fixk = data.train.bowk.tocsr() # k words BOW for querying
pool.target = data.train.target
pool.predicted = []
pool.kwords = np.array(data.train.kwords) # k words
pool.remaining = set(range(pool.data.shape[0])) # indices of the pool
bootstrapped = False
current_cost = 0
iteration = 0
while 0 < student.budget and len(pool.remaining) > step_size and iteration <= args.maxiter:
if not bootstrapped:
## random from each bootstrap
bt = randomsampling.BootstrapFromEach(t * 10)
query_index = bt.bootstrap(pool=pool, k=bootstrap_size)
bootstrapped = True
print "Bootstrap: %s " % bt.__class__.__name__
print
else:
query_index = student.pick_next(pool=pool, k=step_size)
query = pool.fixk[query_index] # query with k words
query_size = [len(vct_analizer(x)) for x in pool.kwords[query_index]]
ground_truth = pool.target[query_index]
#labels, spent = expert.label(unlabeled=query, target=ground_truth)
if iteration == 0: ## bootstrap uses ground truth
labels = ground_truth
spent = [0] * len(ground_truth) ## bootstrap cost is ignored
else:
labels = expert.label_instances(query, ground_truth)
spent = expert.estimate_instances(query_size)
### accumulate the cost of the query
query_cost = np.array(spent).sum()
current_cost += query_cost
## add data recent acquired to train
useful_answers = np.array([[x, y] for x, y in zip(query_index, labels) if y is not None])
# train_indices.extend(query_index)
if useful_answers.shape[0] != 0:
train_indices.extend(useful_answers[:, 0])
# add labels to training
train_x = pool.data[train_indices] ## train with all the words
# update labels with the expert labels
#train_y = pool.target[train_indices]
train_y.extend(useful_answers[:, 1])
if train_x.shape[0] != len(train_y):
raise Exception("Training data corrupted!")
# remove labels from pool
pool.remaining.difference_update(query_index)
# retrain the model
current_model = student.train(train_x, train_y)
# evaluate and save results
y_probas = current_model.predict_proba(data.test.bow)
auc = metrics.roc_auc_score(data.test.target, y_probas[:, 1])
pred_y = current_model.classes_[np.argmax(y_probas, axis=1)]
accu = metrics.accuracy_score(data.test.target, pred_y)
print ("TS:{0}\tAccu:{1:.3f}\tAUC:{2:.3f}\tCost:{3:.2f}\tCumm:{4:.2f}\tSpent:{5}\tuseful:{6}".format(len(train_indices),
accu,
auc, query_cost,
current_cost, format_spent(spent), useful_answers.shape[0]))
## the results should be based on the cost of the labeling
if iteration > 0: # bootstrap iteration
student.budget -= query_cost ## Bootstrap doesn't count
x_axis_range = current_cost
x_axis[x_axis_range].append(current_cost)
## save results
accuracies[x_axis_range].append(accu)
aucs[x_axis_range].append(auc)
trial_accu.append([x_axis_range, accu])
trial_aucs.append([x_axis_range, auc])
iteration += 1
# end of budget loop
tac.append(trial_accu)
tau.append(trial_aucs)
#end trial loop
accuracies = extrapolate_trials(tac, cost_25=parameters[1][1], step_size=args.step_size)
aucs = extrapolate_trials(tau, cost_25=parameters[1][1], step_size=args.step_size)
print("Elapsed time %.3f" % (time.time() - t0))
print_extrapolated_results(accuracies, aucs)
if __name__ == '__main__':
main()
|
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import errno
import json
import os
import time
import urllib
from tempest.common import glance_http
from tempest.common import rest_client
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ImageClientJSON(rest_client.RestClient):
def __init__(self, auth_provider):
super(ImageClientJSON, self).__init__(
auth_provider,
CONF.image.catalog_type,
endpoint_type=CONF.image.endpoint_type)
self._http = None
def _image_meta_from_headers(self, headers):
meta = {'properties': {}}
for key, value in headers.iteritems():
if key.startswith('x-image-meta-property-'):
_key = key[22:]
meta['properties'][_key] = value
elif key.startswith('x-image-meta-'):
_key = key[13:]
meta[_key] = value
for key in ['is_public', 'protected', 'deleted']:
if key in meta:
meta[key] = meta[key].strip().lower() in ('t', 'true', 'yes',
'1')
for key in ['size', 'min_ram', 'min_disk']:
if key in meta:
try:
meta[key] = int(meta[key])
except ValueError:
pass
return meta
def _image_meta_to_headers(self, fields):
headers = {}
fields_copy = copy.deepcopy(fields)
copy_from = fields_copy.pop('copy_from', None)
if copy_from is not None:
headers['x-glance-api-copy-from'] = copy_from
for key, value in fields_copy.pop('properties', {}).iteritems():
headers['x-image-meta-property-%s' % key] = str(value)
for key, value in fields_copy.pop('api', {}).iteritems():
headers['x-glance-api-property-%s' % key] = str(value)
for key, value in fields_copy.iteritems():
headers['x-image-meta-%s' % key] = str(value)
return headers
def _get_file_size(self, obj):
"""Analyze file-like object and attempt to determine its size.
:param obj: file-like object, typically redirected from stdin.
:retval The file's size or None if it cannot be determined.
"""
# For large images, we need to supply the size of the
# image file. See LP Bugs #827660 and #845788.
if hasattr(obj, 'seek') and hasattr(obj, 'tell'):
try:
obj.seek(0, os.SEEK_END)
obj_size = obj.tell()
obj.seek(0)
return obj_size
except IOError as e:
if e.errno == errno.ESPIPE:
# Illegal seek. This means the user is trying
# to pipe image data to the client, e.g.
# echo testdata | bin/glance add blah..., or
# that stdin is empty, or that a file-like
# object which doesn't support 'seek/tell' has
# been supplied.
return None
else:
raise
else:
# Cannot determine size of input image
return None
def _get_http(self):
dscv = CONF.identity.disable_ssl_certificate_validation
ca_certs = CONF.identity.ca_certificates_file
return glance_http.HTTPClient(auth_provider=self.auth_provider,
filters=self.filters,
insecure=dscv, ca_certs=ca_certs)
def _create_with_data(self, headers, data):
resp, body_iter = self.http.raw_request('POST', '/v1/images',
headers=headers, body=data)
self._error_checker('POST', '/v1/images', headers, data, resp,
body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
def _update_with_data(self, image_id, headers, data):
url = '/v1/images/%s' % image_id
resp, body_iter = self.http.raw_request('PUT', url, headers=headers,
body=data)
self._error_checker('PUT', url, headers, data,
resp, body_iter)
body = json.loads(''.join([c for c in body_iter]))
return resp, body['image']
@property
def http(self):
if self._http is None:
if CONF.service_available.glance:
self._http = self._get_http()
return self._http
def create_image(self, name, container_format, disk_format, **kwargs):
params = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
headers = {}
for option in ['is_public', 'location', 'properties',
'copy_from', 'min_ram']:
if option in kwargs:
params[option] = kwargs.get(option)
headers.update(self._image_meta_to_headers(params))
if 'data' in kwargs:
return self._create_with_data(headers, kwargs.get('data'))
resp, body = self.post('v1/images', None, headers)
self.expected_success(201, resp.status)
body = json.loads(body)
return resp, body['image']
def update_image(self, image_id, name=None, container_format=None,
data=None, properties=None):
params = {}
headers = {}
if name is not None:
params['name'] = name
if container_format is not None:
params['container_format'] = container_format
if properties is not None:
params['properties'] = properties
headers.update(self._image_meta_to_headers(params))
if data is not None:
return self._update_with_data(image_id, headers, data)
url = 'v1/images/%s' % image_id
resp, body = self.put(url, data, headers)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['image']
def delete_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.delete(url)
self.expected_success(200, resp.status)
return resp, body
def image_list(self, **kwargs):
url = 'v1/images'
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def image_list_detail(self, properties=dict(), changes_since=None,
**kwargs):
url = 'v1/images/detail'
params = {}
for key, value in properties.items():
params['property-%s' % key] = value
kwargs.update(params)
if changes_since is not None:
kwargs['changes-since'] = changes_since
if len(kwargs) > 0:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body['images']
def get_image_meta(self, image_id):
url = 'v1/images/%s' % image_id
resp, __ = self.head(url)
self.expected_success(200, resp.status)
body = self._image_meta_from_headers(resp)
return resp, body
def get_image(self, image_id):
url = 'v1/images/%s' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
return resp, body
def is_resource_deleted(self, id):
try:
self.get_image_meta(id)
except exceptions.NotFound:
return True
return False
@property
def resource_type(self):
"""Returns the primary type of resource this client works with."""
return 'image_meta'
def get_image_membership(self, image_id):
url = 'v1/images/%s/members' % image_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def get_shared_images(self, member_id):
url = 'v1/shared-images/%s' % member_id
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return resp, body
def add_member(self, member_id, image_id, can_share=False):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
body = None
if can_share:
body = json.dumps({'member': {'can_share': True}})
resp, __ = self.put(url, body)
self.expected_success(204, resp.status)
return resp
def delete_member(self, member_id, image_id):
url = 'v1/images/%s/members/%s' % (image_id, member_id)
resp, __ = self.delete(url)
self.expected_success(204, resp.status)
return resp
# NOTE(afazekas): just for the wait function
def _get_image_status(self, image_id):
resp, meta = self.get_image_meta(image_id)
status = meta['status']
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_image_status(self, image_id, status):
"""Waits for a Image to reach a given status."""
start_time = time.time()
old_value = value = self._get_image_status(image_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if value == status:
return value
if value == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_image_status(image_id)
|
|
"""Integration tests for the auth component."""
from datetime import timedelta
from http import HTTPStatus
from unittest.mock import patch
import pytest
from homeassistant.auth import InvalidAuthError
from homeassistant.auth.models import Credentials
from homeassistant.components import auth
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from . import async_setup_auth
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI, MockUser
@pytest.fixture
def mock_credential():
"""Return a mock credential."""
return Credentials(
id="mock-credential-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
async def async_setup_user_refresh_token(hass):
"""Create a testing user with a connected credential."""
user = await hass.auth.async_create_user("Test User")
credential = Credentials(
id="mock-credential-id",
auth_provider_type="insecure_example",
auth_provider_id=None,
data={"username": "test-user"},
is_new=False,
)
user.credentials.append(credential)
return await hass.auth.async_create_refresh_token(
user, CLIENT_ID, credential=credential
)
async def test_login_new_user_and_trying_refresh_token(hass, aiohttp_client):
"""Test logging in with new user and refreshing tokens."""
client = await async_setup_auth(hass, aiohttp_client, setup_api=True)
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", None],
"redirect_uri": CLIENT_REDIRECT_URI,
},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={"client_id": CLIENT_ID, "username": "test-user", "password": "test-pass"},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
code = step["result"]
# Exchange code for tokens
resp = await client.post(
"/auth/token",
data={"client_id": CLIENT_ID, "grant_type": "authorization_code", "code": code},
)
assert resp.status == HTTPStatus.OK
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Use refresh token to get more tokens.
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": tokens["refresh_token"],
},
)
assert resp.status == HTTPStatus.OK
tokens = await resp.json()
assert "refresh_token" not in tokens
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Test using access token to hit API.
resp = await client.get("/api/")
assert resp.status == HTTPStatus.UNAUTHORIZED
resp = await client.get(
"/api/", headers={"authorization": f"Bearer {tokens['access_token']}"}
)
assert resp.status == HTTPStatus.OK
async def test_auth_code_checks_local_only_user(hass, aiohttp_client):
"""Test local only user cannot exchange auth code for refresh tokens when external."""
client = await async_setup_auth(hass, aiohttp_client, setup_api=True)
resp = await client.post(
"/auth/login_flow",
json={
"client_id": CLIENT_ID,
"handler": ["insecure_example", None],
"redirect_uri": CLIENT_REDIRECT_URI,
},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
resp = await client.post(
f"/auth/login_flow/{step['flow_id']}",
json={"client_id": CLIENT_ID, "username": "test-user", "password": "test-pass"},
)
assert resp.status == HTTPStatus.OK
step = await resp.json()
code = step["result"]
# Exchange code for tokens
with patch(
"homeassistant.components.auth.async_user_not_allowed_do_auth",
return_value="User is local only",
):
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": code,
},
)
assert resp.status == HTTPStatus.FORBIDDEN
error = await resp.json()
assert error["error"] == "access_denied"
def test_auth_code_store_expiration(mock_credential):
"""Test that the auth code store will not return expired tokens."""
store, retrieve = auth._create_auth_code_store()
client_id = "bla"
now = utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
code = store(client_id, mock_credential)
with patch(
"homeassistant.util.dt.utcnow", return_value=now + timedelta(minutes=10)
):
assert retrieve(client_id, code) is None
with patch("homeassistant.util.dt.utcnow", return_value=now):
code = store(client_id, mock_credential)
with patch(
"homeassistant.util.dt.utcnow",
return_value=now + timedelta(minutes=9, seconds=59),
):
assert retrieve(client_id, code) == mock_credential
def test_auth_code_store_requires_credentials(mock_credential):
"""Test we require credentials."""
store, _retrieve = auth._create_auth_code_store()
with pytest.raises(ValueError):
store(None, MockUser())
store(None, mock_credential)
async def test_ws_current_user(hass, hass_ws_client, hass_access_token):
"""Test the current user command with Home Assistant creds."""
assert await async_setup_component(hass, "auth", {})
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
user = refresh_token.user
client = await hass_ws_client(hass, hass_access_token)
await client.send_json({"id": 5, "type": auth.WS_TYPE_CURRENT_USER})
result = await client.receive_json()
assert result["success"], result
user_dict = result["result"]
assert user_dict["name"] == user.name
assert user_dict["id"] == user.id
assert user_dict["is_owner"] == user.is_owner
assert len(user_dict["credentials"]) == 1
hass_cred = user_dict["credentials"][0]
assert hass_cred["auth_provider_type"] == "homeassistant"
assert hass_cred["auth_provider_id"] is None
assert "data" not in hass_cred
async def test_cors_on_token(hass, aiohttp_client):
"""Test logging in with new user and refreshing tokens."""
client = await async_setup_auth(hass, aiohttp_client)
resp = await client.options(
"/auth/token",
headers={
"origin": "http://example.com",
"Access-Control-Request-Method": "POST",
},
)
assert resp.headers["Access-Control-Allow-Origin"] == "http://example.com"
assert resp.headers["Access-Control-Allow-Methods"] == "POST"
resp = await client.post("/auth/token", headers={"origin": "http://example.com"})
assert resp.headers["Access-Control-Allow-Origin"] == "http://example.com"
async def test_refresh_token_system_generated(hass, aiohttp_client):
"""Test that we can get access tokens for system generated user."""
client = await async_setup_auth(hass, aiohttp_client)
user = await hass.auth.async_create_system_user("Test System")
refresh_token = await hass.auth.async_create_refresh_token(user, None)
resp = await client.post(
"/auth/token",
data={
"client_id": "https://this-is-not-allowed-for-system-users.com/",
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.BAD_REQUEST
result = await resp.json()
assert result["error"] == "invalid_request"
resp = await client.post(
"/auth/token",
data={"grant_type": "refresh_token", "refresh_token": refresh_token.token},
)
assert resp.status == HTTPStatus.OK
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
async def test_refresh_token_different_client_id(hass, aiohttp_client):
"""Test that we verify client ID."""
client = await async_setup_auth(hass, aiohttp_client)
refresh_token = await async_setup_user_refresh_token(hass)
# No client ID
resp = await client.post(
"/auth/token",
data={"grant_type": "refresh_token", "refresh_token": refresh_token.token},
)
assert resp.status == HTTPStatus.BAD_REQUEST
result = await resp.json()
assert result["error"] == "invalid_request"
# Different client ID
resp = await client.post(
"/auth/token",
data={
"client_id": "http://example-different.com",
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.BAD_REQUEST
result = await resp.json()
assert result["error"] == "invalid_request"
# Correct
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.OK
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
async def test_refresh_token_checks_local_only_user(hass, aiohttp_client):
"""Test that we can't refresh token for a local only user when external."""
client = await async_setup_auth(hass, aiohttp_client)
refresh_token = await async_setup_user_refresh_token(hass)
refresh_token.user.local_only = True
with patch(
"homeassistant.components.auth.async_user_not_allowed_do_auth",
return_value="User is local only",
):
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.FORBIDDEN
result = await resp.json()
assert result["error"] == "access_denied"
async def test_refresh_token_provider_rejected(
hass, aiohttp_client, hass_admin_user, hass_admin_credential
):
"""Test that we verify client ID."""
client = await async_setup_auth(hass, aiohttp_client)
refresh_token = await async_setup_user_refresh_token(hass)
# Rejected by provider
with patch(
"homeassistant.auth.providers.insecure_example.ExampleAuthProvider.async_validate_refresh_token",
side_effect=InvalidAuthError("Invalid access"),
):
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.FORBIDDEN
result = await resp.json()
assert result["error"] == "access_denied"
assert result["error_description"] == "Invalid access"
async def test_revoking_refresh_token(hass, aiohttp_client):
"""Test that we can revoke refresh tokens."""
client = await async_setup_auth(hass, aiohttp_client)
refresh_token = await async_setup_user_refresh_token(hass)
# Test that we can create an access token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.OK
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Revoke refresh token
resp = await client.post(
"/auth/token", data={"token": refresh_token.token, "action": "revoke"}
)
assert resp.status == HTTPStatus.OK
# Old access token should be no longer valid
assert await hass.auth.async_validate_access_token(tokens["access_token"]) is None
# Test that we no longer can create an access token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "refresh_token",
"refresh_token": refresh_token.token,
},
)
assert resp.status == HTTPStatus.BAD_REQUEST
async def test_ws_long_lived_access_token(hass, hass_ws_client, hass_access_token):
"""Test generate long-lived access token."""
assert await async_setup_component(hass, "auth", {"http": {}})
ws_client = await hass_ws_client(hass, hass_access_token)
# verify create long-lived access token
await ws_client.send_json(
{
"id": 5,
"type": auth.WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
"client_name": "GPS Logger",
"lifespan": 365,
}
)
result = await ws_client.receive_json()
assert result["success"], result
long_lived_access_token = result["result"]
assert long_lived_access_token is not None
refresh_token = await hass.auth.async_validate_access_token(long_lived_access_token)
assert refresh_token.client_id is None
assert refresh_token.client_name == "GPS Logger"
assert refresh_token.client_icon is None
async def test_ws_refresh_tokens(hass, hass_ws_client, hass_access_token):
"""Test fetching refresh token metadata."""
assert await async_setup_component(hass, "auth", {"http": {}})
ws_client = await hass_ws_client(hass, hass_access_token)
await ws_client.send_json({"id": 5, "type": auth.WS_TYPE_REFRESH_TOKENS})
result = await ws_client.receive_json()
assert result["success"], result
assert len(result["result"]) == 1
token = result["result"][0]
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
assert token["id"] == refresh_token.id
assert token["type"] == refresh_token.token_type
assert token["client_id"] == refresh_token.client_id
assert token["client_name"] == refresh_token.client_name
assert token["client_icon"] == refresh_token.client_icon
assert token["created_at"] == refresh_token.created_at.isoformat()
assert token["is_current"] is True
assert token["last_used_at"] == refresh_token.last_used_at.isoformat()
assert token["last_used_ip"] == refresh_token.last_used_ip
async def test_ws_delete_refresh_token(
hass, hass_admin_user, hass_admin_credential, hass_ws_client, hass_access_token
):
"""Test deleting a refresh token."""
assert await async_setup_component(hass, "auth", {"http": {}})
refresh_token = await hass.auth.async_create_refresh_token(
hass_admin_user, CLIENT_ID, credential=hass_admin_credential
)
ws_client = await hass_ws_client(hass, hass_access_token)
# verify create long-lived access token
await ws_client.send_json(
{
"id": 5,
"type": auth.WS_TYPE_DELETE_REFRESH_TOKEN,
"refresh_token_id": refresh_token.id,
}
)
result = await ws_client.receive_json()
assert result["success"], result
refresh_token = await hass.auth.async_get_refresh_token(refresh_token.id)
assert refresh_token is None
async def test_ws_sign_path(hass, hass_ws_client, hass_access_token):
"""Test signing a path."""
assert await async_setup_component(hass, "auth", {"http": {}})
ws_client = await hass_ws_client(hass, hass_access_token)
with patch(
"homeassistant.components.auth.async_sign_path", return_value="hello_world"
) as mock_sign:
await ws_client.send_json(
{
"id": 5,
"type": auth.WS_TYPE_SIGN_PATH,
"path": "/api/hello",
"expires": 20,
}
)
result = await ws_client.receive_json()
assert result["success"], result
assert result["result"] == {"path": "hello_world"}
assert len(mock_sign.mock_calls) == 1
hass, path, expires = mock_sign.mock_calls[0][1]
assert path == "/api/hello"
assert expires.total_seconds() == 20
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import stat
import subprocess
import tempfile
from catapult_base import binary_manager
from telemetry.core import android_platform
from telemetry.core import exceptions
from telemetry.core import platform
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.forwarders import android_forwarder
from telemetry.internal.image_processing import video
from telemetry.internal.platform import android_device
from telemetry.internal.platform import linux_based_platform_backend
from telemetry.internal.platform.power_monitor import android_dumpsys_power_monitor
from telemetry.internal.platform.power_monitor import android_temperature_monitor
from telemetry.internal.platform.power_monitor import monsoon_power_monitor
from telemetry.internal.platform.power_monitor import power_monitor_controller
from telemetry.internal.platform.power_monitor import sysfs_power_monitor
from telemetry.internal.platform.profiler import android_prebuilt_profiler_helper
from telemetry.internal.util import exception_formatter
from telemetry.internal.util import external_modules
psutil = external_modules.ImportOptionalModule('psutil')
util.AddDirToPythonPath(util.GetChromiumSrcDir(),
'third_party', 'webpagereplay')
import adb_install_cert
import certutils
import platformsettings
# Get build/android scripts into our path.
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib import constants
from pylib import screenshot
from pylib.device import battery_utils
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.perf import cache_control
from pylib.perf import perf_control
from pylib.perf import thermal_throttle
try:
from pylib.perf import surface_stats_collector
except Exception:
surface_stats_collector = None
_DEVICE_COPY_SCRIPT_FILE = os.path.join(
constants.DIR_SOURCE_ROOT, 'build', 'android', 'pylib',
'efficient_android_directory_copy.sh')
_DEVICE_COPY_SCRIPT_LOCATION = (
'/data/local/tmp/efficient_android_directory_copy.sh')
def _SetupPrebuiltTools(device):
"""Some of the android pylib scripts we depend on are lame and expect
binaries to be in the out/ directory. So we copy any prebuilt binaries there
as a prereq."""
# TODO(bulach): Build the targets for x86/mips.
device_tools = [
'file_poller',
'forwarder_dist/device_forwarder',
'md5sum_dist/md5sum_bin',
'purge_ashmem',
'run_pie',
]
host_tools = [
'bitmaptools',
'md5sum_bin_host',
]
platform_name = platform.GetHostPlatform().GetOSName()
if platform_name == 'linux':
host_tools.append('host_forwarder')
arch_name = device.product_cpu_abi
has_device_prebuilt = (arch_name.startswith('armeabi')
or arch_name.startswith('arm64'))
if not has_device_prebuilt:
logging.warning('Unknown architecture type: %s' % arch_name)
return all([binary_manager.LocalPath(t, platform_name, arch_name)
for t in device_tools])
build_type = None
for t in device_tools + host_tools:
executable = os.path.basename(t)
locally_built_path = binary_manager.LocalPath(
t, platform_name, arch_name)
if not build_type:
build_type = _GetBuildTypeOfPath(locally_built_path) or 'Release'
constants.SetBuildType(build_type)
dest = os.path.join(constants.GetOutDirectory(), t)
if not locally_built_path:
logging.info('Setting up prebuilt %s', dest)
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
platform_name = ('android' if t in device_tools else
platform.GetHostPlatform().GetOSName())
bin_arch_name = (arch_name if t in device_tools else
platform.GetHostPlatform().GetArchName())
prebuilt_path = binary_manager.FetchPath(
executable, bin_arch_name, platform_name)
if not prebuilt_path or not os.path.exists(prebuilt_path):
raise NotImplementedError("""
%s must be checked into cloud storage.
Instructions:
http://www.chromium.org/developers/telemetry/upload_to_cloud_storage
""" % t)
shutil.copyfile(prebuilt_path, dest)
os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
return True
def _GetBuildTypeOfPath(path):
if not path:
return None
for build_dir, build_type in util.GetBuildDirectories():
if os.path.join(build_dir, build_type) in path:
return build_type
return None
class AndroidPlatformBackend(
linux_based_platform_backend.LinuxBasedPlatformBackend):
def __init__(self, device, finder_options):
assert device, (
'AndroidPlatformBackend can only be initialized from remote device')
super(AndroidPlatformBackend, self).__init__(device)
self._device = device_utils.DeviceUtils(device.device_id)
installed_prebuilt_tools = _SetupPrebuiltTools(self._device)
if not installed_prebuilt_tools:
logging.error(
'%s detected, however prebuilt android tools could not '
'be used. To run on Android you must build them first:\n'
' $ ninja -C out/Release android_tools' % device.name)
raise exceptions.PlatformError()
# Trying to root the device, if possible.
if not self._device.HasRoot():
try:
self._device.EnableRoot()
except device_errors.CommandFailedError:
logging.warning('Unable to root %s', str(self._device))
self._battery = battery_utils.BatteryUtils(self._device)
self._enable_performance_mode = device.enable_performance_mode
self._surface_stats_collector = None
self._perf_tests_setup = perf_control.PerfControl(self._device)
self._thermal_throttle = thermal_throttle.ThermalThrottle(self._device)
self._raw_display_frame_rate_measurements = []
try:
self._can_access_protected_file_contents = (
self._device.HasRoot() or self._device.NeedsSU())
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
self._device_copy_script = None
power_controller = power_monitor_controller.PowerMonitorController([
monsoon_power_monitor.MonsoonPowerMonitor(self._device, self),
android_dumpsys_power_monitor.DumpsysPowerMonitor(self._battery, self),
sysfs_power_monitor.SysfsPowerMonitor(self, standalone=True),
], self._battery)
self._power_monitor = android_temperature_monitor.AndroidTemperatureMonitor(
power_controller, self._device)
self._video_recorder = None
self._installed_applications = None
self._wpr_ca_cert_path = None
self._device_cert_util = None
self._is_test_ca_installed = False
self._use_rndis_forwarder = (
finder_options.android_rndis or
finder_options.browser_options.netsim or
platform.GetHostPlatform().GetOSName() != 'linux')
_FixPossibleAdbInstability()
@classmethod
def SupportsDevice(cls, device):
return isinstance(device, android_device.AndroidDevice)
@classmethod
def CreatePlatformForDevice(cls, device, finder_options):
assert cls.SupportsDevice(device)
platform_backend = AndroidPlatformBackend(device, finder_options)
return android_platform.AndroidPlatform(platform_backend)
@property
def forwarder_factory(self):
if not self._forwarder_factory:
self._forwarder_factory = android_forwarder.AndroidForwarderFactory(
self._device, self._use_rndis_forwarder)
return self._forwarder_factory
@property
def use_rndis_forwarder(self):
return self._use_rndis_forwarder
@property
def device(self):
return self._device
def IsDisplayTracingSupported(self):
return bool(self.GetOSVersionName() >= 'J')
def StartDisplayTracing(self):
assert not self._surface_stats_collector
# Clear any leftover data from previous timed out tests
self._raw_display_frame_rate_measurements = []
self._surface_stats_collector = \
surface_stats_collector.SurfaceStatsCollector(self._device)
self._surface_stats_collector.Start()
def StopDisplayTracing(self):
if not self._surface_stats_collector:
return
refresh_period, timestamps = self._surface_stats_collector.Stop()
pid = self._surface_stats_collector.GetSurfaceFlingerPid()
self._surface_stats_collector = None
# TODO(sullivan): should this code be inline, or live elsewhere?
events = []
for ts in timestamps:
events.append({
'cat': 'SurfaceFlinger',
'name': 'vsync_before',
'ts': ts,
'pid': pid,
'tid': pid,
'args': {'data': {
'frame_count': 1,
'refresh_period': refresh_period,
}}
})
return events
def SetFullPerformanceModeEnabled(self, enabled):
if not self._enable_performance_mode:
logging.warning('CPU governor will not be set!')
return
if enabled:
self._perf_tests_setup.SetHighPerfMode()
else:
self._perf_tests_setup.SetDefaultPerfMode()
def CanMonitorThermalThrottling(self):
return True
def IsThermallyThrottled(self):
return self._thermal_throttle.IsThrottled()
def HasBeenThermallyThrottled(self):
return self._thermal_throttle.HasBeenThrottled()
def GetCpuStats(self, pid):
if not self._can_access_protected_file_contents:
logging.warning('CPU stats cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuStats(pid)
def GetCpuTimestamp(self):
if not self._can_access_protected_file_contents:
logging.warning('CPU timestamp cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuTimestamp()
def PurgeUnpinnedMemory(self):
"""Purges the unpinned ashmem memory for the whole system.
This can be used to make memory measurements more stable. Requires root.
"""
if not self._can_access_protected_file_contents:
logging.warning('Cannot run purge_ashmem. Requires a rooted device.')
return
if not android_prebuilt_profiler_helper.InstallOnDevice(
self._device, 'purge_ashmem'):
raise Exception('Error installing purge_ashmem.')
try:
output = self._device.RunShellCommand(
android_prebuilt_profiler_helper.GetDevicePath('purge_ashmem'))
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
for l in output:
logging.info(l)
def GetMemoryStats(self, pid):
memory_usage = self._device.GetMemoryUsageForPid(pid)
if not memory_usage:
return {}
return {'ProportionalSetSize': memory_usage['Pss'] * 1024,
'SharedDirty': memory_usage['Shared_Dirty'] * 1024,
'PrivateDirty': memory_usage['Private_Dirty'] * 1024,
'VMPeak': memory_usage['VmHWM'] * 1024}
def GetChildPids(self, pid):
child_pids = []
ps = self.GetPsOutput(['pid', 'name'])
for curr_pid, curr_name in ps:
if int(curr_pid) == pid:
name = curr_name
for curr_pid, curr_name in ps:
if curr_name.startswith(name) and curr_name != name:
child_pids.append(int(curr_pid))
break
return child_pids
@decorators.Cache
def GetCommandLine(self, pid):
ps = self.GetPsOutput(['pid', 'name'], pid)
if not ps:
raise exceptions.ProcessGoneException()
return ps[0][1]
@decorators.Cache
def GetArchName(self):
return self._device.GetABI()
def GetOSName(self):
return 'android'
def GetDeviceTypeName(self):
return self._device.product_model
@decorators.Cache
def GetOSVersionName(self):
return self._device.GetProp('ro.build.id')[0]
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
cache = cache_control.CacheControl(self._device)
cache.DropRamCaches()
def FlushSystemCacheForDirectory(self, directory):
raise NotImplementedError()
def FlushDnsCache(self):
self._device.RunShellCommand('ndc resolver flushdefaultif', as_root=True)
def StopApplication(self, application):
"""Stop the given |application|.
Args:
application: The full package name string of the application to stop.
"""
self._device.ForceStop(application)
def KillApplication(self, application):
"""Kill the given |application|.
Might be used instead of ForceStop for efficiency reasons.
Args:
application: The full package name string of the application to kill.
"""
self._device.KillAll(application, blocking=True, quiet=True)
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
"""Launches the given |application| with a list of |parameters| on the OS.
Args:
application: The full package name string of the application to launch.
parameters: A list of parameters to be passed to the ActivityManager.
elevate_privilege: Currently unimplemented on Android.
"""
if elevate_privilege:
raise NotImplementedError("elevate_privilege isn't supported on android.")
if not parameters:
parameters = ''
result_lines = self._device.RunShellCommand('am start %s %s' %
(parameters, application))
for line in result_lines:
if line.startswith('Error: '):
raise ValueError('Failed to start "%s" with error\n %s' %
(application, line))
def IsApplicationRunning(self, application):
return len(self._device.GetPids(application)) > 0
def CanLaunchApplication(self, application):
if not self._installed_applications:
self._installed_applications = self._device.RunShellCommand(
'pm list packages')
return 'package:' + application in self._installed_applications
def InstallApplication(self, application):
self._installed_applications = None
self._device.Install(application)
@decorators.Cache
def CanCaptureVideo(self):
return self.GetOSVersionName() >= 'K'
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts the video capture at specified bitrate."""
min_bitrate_mbps = max(min_bitrate_mbps, 0.1)
if min_bitrate_mbps > 100:
raise ValueError('Android video capture cannot capture at %dmbps. '
'Max capture rate is 100mbps.' % min_bitrate_mbps)
if self.is_video_capture_running:
self._video_recorder.Stop()
self._video_recorder = screenshot.VideoRecorder(
self._device, megabits_per_second=min_bitrate_mbps)
self._video_recorder.Start()
util.WaitFor(self._video_recorder.IsStarted, 5)
@property
def is_video_capture_running(self):
return self._video_recorder is not None
def StopVideoCapture(self):
assert self.is_video_capture_running, 'Must start video capture first'
self._video_recorder.Stop()
video_file_obj = tempfile.NamedTemporaryFile()
self._video_recorder.Pull(video_file_obj.name)
self._video_recorder = None
return video.Video(video_file_obj)
def CanMonitorPower(self):
return self._power_monitor.CanMonitorPower()
def StartMonitoringPower(self, browser):
self._power_monitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._power_monitor.StopMonitoringPower()
def CanMonitorNetworkData(self):
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
return False
return True
def GetNetworkData(self, browser):
return self._battery.GetNetworkData(browser._browser_backend.package)
def PathExists(self, device_path, timeout=None, retries=None):
""" Return whether the given path exists on the device.
This method is the same as
android.pylib.device.device_utils.DeviceUtils.PathExists.
"""
return self._device.PathExists(
device_path, timeout=timeout, retries=retries)
def GetFileContents(self, fname):
if not self._can_access_protected_file_contents:
logging.warning('%s cannot be retrieved on non-rooted device.' % fname)
return ''
return self._device.ReadFile(fname, as_root=True)
def GetPsOutput(self, columns, pid=None):
assert columns == ['pid', 'name'] or columns == ['pid'], \
'Only know how to return pid and name. Requested: ' + columns
command = 'ps'
if pid:
command += ' -p %d' % pid
ps = self._device.RunShellCommand(command, large_output=True)[1:]
output = []
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if columns == ['pid', 'name']:
output.append([curr_pid, curr_name])
else:
output.append([curr_pid])
return output
def RunCommand(self, command):
return '\n'.join(self._device.RunShellCommand(command))
@staticmethod
def ParseCStateSample(sample):
sample_stats = {}
for cpu in sample:
values = sample[cpu].splitlines()
# Each state has three values after excluding the time value.
num_states = (len(values) - 1) / 3
names = values[:num_states]
times = values[num_states:2 * num_states]
cstates = {'C0': int(values[-1]) * 10 ** 6}
for i, state in enumerate(names):
if state == 'C0':
# The Exynos cpuidle driver for the Nexus 10 uses the name 'C0' for
# its WFI state.
# TODO(tmandel): We should verify that no other Android device
# actually reports time in C0 causing this to report active time as
# idle time.
state = 'WFI'
cstates[state] = int(times[i])
cstates['C0'] -= int(times[i])
sample_stats[cpu] = cstates
return sample_stats
def SetRelaxSslCheck(self, value):
old_flag = self._device.GetProp('socket.relaxsslcheck')
self._device.SetProp('socket.relaxsslcheck', value)
return old_flag
def ForwardHostToDevice(self, host_port, device_port):
self._device.adb.Forward('tcp:%d' % host_port, device_port)
def DismissCrashDialogIfNeeded(self):
"""Dismiss any error dialogs.
Limit the number in case we have an error loop or we are failing to dismiss.
"""
for _ in xrange(10):
if not self._device.old_interface.DismissCrashDialogIfNeeded():
break
def IsAppRunning(self, process_name):
"""Determine if the given process is running.
Args:
process_name: The full package name string of the process.
"""
return bool(self._device.GetPids(process_name))
@property
def wpr_ca_cert_path(self):
"""Path to root certificate installed on browser (or None).
If this is set, web page replay will use it to sign HTTPS responses.
"""
if self._wpr_ca_cert_path:
assert os.path.isfile(self._wpr_ca_cert_path)
return self._wpr_ca_cert_path
def InstallTestCa(self):
"""Install a randomly generated root CA on the android device.
This allows transparent HTTPS testing with WPR server without need
to tweak application network stack.
"""
# TODO(slamm): Move certificate creation related to webpagereplay.py.
# The only code that needs to be in platform backend is installing the cert.
if certutils.openssl_import_error:
logging.warning(
'The OpenSSL module is unavailable. '
'Will fallback to ignoring certificate errors.')
return
if not platformsettings.HasSniSupport():
logging.warning(
'Web Page Replay requires SNI support (pyOpenSSL 0.13 or greater) '
'to generate certificates from a test CA. '
'Will fallback to ignoring certificate errors.')
return
try:
self._wpr_ca_cert_path = os.path.join(tempfile.mkdtemp(), 'testca.pem')
certutils.write_dummy_ca_cert(*certutils.generate_dummy_ca_cert(),
cert_path=self._wpr_ca_cert_path)
self._device_cert_util = adb_install_cert.AndroidCertInstaller(
self._device.adb.GetDeviceSerial(), None, self._wpr_ca_cert_path)
logging.info('Installing test certificate authority on device: %s',
str(self._device))
self._device_cert_util.install_cert(overwrite_cert=True)
self._is_test_ca_installed = True
except Exception as e:
# Fallback to ignoring certificate errors.
self.RemoveTestCa()
logging.warning(
'Unable to install test certificate authority on device: %s. '
'Will fallback to ignoring certificate errors. Install error: %s',
str(self._device), e)
@property
def is_test_ca_installed(self):
return self._is_test_ca_installed
def RemoveTestCa(self):
"""Remove root CA generated by previous call to InstallTestCa().
Removes the test root certificate from both the device and host machine.
"""
if not self._wpr_ca_cert_path:
return
if self._is_test_ca_installed:
try:
self._device_cert_util.remove_cert()
except Exception:
# Best effort cleanup - show the error and continue.
exception_formatter.PrintFormattedException(
msg=('Error while trying to remove certificate authority: %s. '
% str(self._device)))
self._is_test_ca_installed = False
shutil.rmtree(os.path.dirname(self._wpr_ca_cert_path), ignore_errors=True)
self._wpr_ca_cert_path = None
self._device_cert_util = None
def PushProfile(self, package, new_profile_dir):
"""Replace application profile with files found on host machine.
Pushing the profile is slow, so we don't want to do it every time.
Avoid this by pushing to a safe location using PushChangedFiles, and
then copying into the correct location on each test run.
Args:
package: The full package name string of the application for which the
profile is to be updated.
new_profile_dir: Location where profile to be pushed is stored on the
host machine.
"""
(profile_parent, profile_base) = os.path.split(new_profile_dir)
# If the path ends with a '/' python split will return an empty string for
# the base name; so we now need to get the base name from the directory.
if not profile_base:
profile_base = os.path.basename(profile_parent)
saved_profile_location = '/sdcard/profile/%s' % profile_base
self._device.PushChangedFiles([(new_profile_dir, saved_profile_location)])
profile_dir = self._GetProfileDir(package)
try:
self._EfficientDeviceDirectoryCopy(
saved_profile_location, profile_dir)
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
dumpsys = self._device.RunShellCommand('dumpsys package %s' % package)
id_line = next(line for line in dumpsys if 'userId=' in line)
uid = re.search(r'\d+', id_line).group()
files = self._device.RunShellCommand(
'ls "%s"' % profile_dir, as_root=True)
files.remove('lib')
paths = ['%s%s' % (profile_dir, f) for f in files]
for path in paths:
extended_path = '%s %s/* %s/*/* %s/*/*/*' % (path, path, path, path)
self._device.RunShellCommand(
'chown %s.%s %s' % (uid, uid, extended_path))
def _EfficientDeviceDirectoryCopy(self, source, dest):
if not self._device_copy_script:
self._device.adb.Push(
_DEVICE_COPY_SCRIPT_FILE,
_DEVICE_COPY_SCRIPT_LOCATION)
self._device_copy_script = _DEVICE_COPY_SCRIPT_FILE
self._device.RunShellCommand(
['sh', self._device_copy_script, source, dest])
def RemoveProfile(self, package, ignore_list):
"""Delete application profile on device.
Args:
package: The full package name string of the application for which the
profile is to be deleted.
ignore_list: List of files to keep.
"""
profile_dir = self._GetProfileDir(package)
files = self._device.RunShellCommand(
'ls "%s"' % profile_dir, as_root=True)
paths = ['"%s%s"' % (profile_dir, f) for f in files
if f not in ignore_list]
self._device.RunShellCommand('rm -r %s' % ' '.join(paths), as_root=True)
def PullProfile(self, package, output_profile_path):
"""Copy application profile from device to host machine.
Args:
package: The full package name string of the application for which the
profile is to be copied.
output_profile_dir: Location where profile to be stored on host machine.
"""
profile_dir = self._GetProfileDir(package)
logging.info("Pulling profile directory from device: '%s'->'%s'.",
profile_dir, output_profile_path)
# To minimize bandwidth it might be good to look at whether all the data
# pulled down is really needed e.g. .pak files.
if not os.path.exists(output_profile_path):
os.makedirs(output_profile_path)
try:
files = self._device.RunShellCommand(['ls', profile_dir])
except:
logging.exception('New exception caused by DeviceUtils conversion')
raise
for f in files:
# Don't pull lib, since it is created by the installer.
if f != 'lib':
source = '%s%s' % (profile_dir, f)
dest = os.path.join(output_profile_path, f)
try:
self._device.PullFile(source, dest, timeout=240)
except device_errors.CommandFailedError:
logging.exception('Failed to pull %s to %s', source, dest)
def _GetProfileDir(self, package):
"""Returns the on-device location where the application profile is stored
based on Android convention.
Args:
package: The full package name string of the application.
"""
return '/data/data/%s/' % package
def SetDebugApp(self, package):
"""Set application to debugging.
Args:
package: The full package name string of the application.
"""
if self._device.IsUserBuild():
logging.debug('User build device, setting debug app')
self._device.RunShellCommand('am set-debug-app --persistent %s' % package)
def GetStandardOutput(self, number_of_lines=500):
"""Returns most recent lines of logcat dump.
Args:
number_of_lines: Number of lines of log to return.
"""
return '\n'.join(self._device.RunShellCommand(
'logcat -d -t %d' % number_of_lines))
def GetStackTrace(self, target_arch):
"""Returns stack trace.
The stack trace consists of raw logcat dump, logcat dump with symbols,
and stack info from tomstone files.
Args:
target_arch: String specifying device architecture (eg. arm, arm64, mips,
x86, x86_64)
"""
def Decorate(title, content):
return "%s\n%s\n%s\n" % (title, content, '*' * 80)
# Get the last lines of logcat (large enough to contain stacktrace)
logcat = self.GetStandardOutput()
ret = Decorate('Logcat', logcat)
stack = os.path.join(util.GetChromiumSrcDir(), 'third_party',
'android_platform', 'development', 'scripts', 'stack')
# Try to symbolize logcat.
if os.path.exists(stack):
cmd = [stack]
if target_arch:
cmd.append('--arch=%s' % target_arch)
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret += Decorate('Stack from Logcat', p.communicate(input=logcat)[0])
# Try to get tombstones.
tombstones = os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'tombstones.py')
if os.path.exists(tombstones):
ret += Decorate('Tombstones',
subprocess.Popen([tombstones, '-w', '--device',
self._device.adb.GetDeviceSerial()],
stdout=subprocess.PIPE).communicate()[0])
return ret
@staticmethod
def _IsScreenOn(input_methods):
"""Parser method of IsScreenOn()
Args:
input_methods: Output from dumpsys input_methods
Returns:
boolean: True if screen is on, false if screen is off.
Raises:
ValueError: An unknown value is found for the screen state.
AndroidDeviceParsingError: Error in detecting screen state.
"""
for line in input_methods:
if 'mScreenOn' in line or 'mInteractive' in line:
for pair in line.strip().split(' '):
key, value = pair.split('=', 1)
if key == 'mScreenOn' or key == 'mInteractive':
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ValueError('Unknown value for %s: %s' % (key, value))
raise exceptions.AndroidDeviceParsingError(str(input_methods))
def IsScreenOn(self):
"""Determines if device screen is on."""
input_methods = self._device.RunShellCommand('dumpsys input_method',
check_return=True)
return self._IsScreenOn(input_methods)
@staticmethod
def _IsScreenLocked(input_methods):
"""Parser method for IsScreenLocked()
Args:
input_methods: Output from dumpsys input_methods
Returns:
boolean: True if screen is locked, false if screen is not locked.
Raises:
ValueError: An unknown value is found for the screen lock state.
AndroidDeviceParsingError: Error in detecting screen state.
"""
for line in input_methods:
if 'mHasBeenInactive' in line:
for pair in line.strip().split(' '):
key, value = pair.split('=', 1)
if key == 'mHasBeenInactive':
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ValueError('Unknown value for %s: %s' % (key, value))
raise exceptions.AndroidDeviceParsingError(str(input_methods))
def IsScreenLocked(self):
"""Determines if device screen is locked."""
input_methods = self._device.RunShellCommand('dumpsys input_method',
check_return=True)
return self._IsScreenLocked(input_methods)
def _FixPossibleAdbInstability():
"""Host side workaround for crbug.com/268450 (adb instability).
The adb server has a race which is mitigated by binding to a single core.
"""
if not psutil:
return
for process in psutil.process_iter():
try:
if psutil.version_info >= (2, 0):
if 'adb' in process.name():
process.cpu_affinity([0])
else:
if 'adb' in process.name:
process.set_cpu_affinity([0])
except (psutil.NoSuchProcess, psutil.AccessDenied):
logging.warn('Failed to set adb process CPU affinity')
|
|
# Using the following encoding: utf-8
# Python 2
# from ConfigParser import ConfigParser, NoOptionError
# End Python 2
# Python 3
from configparser import ConfigParser, NoOptionError
# End Python 3
import requests
import os
import shutil
import fnmatch
import time
import getpass
import itertools
from ltk import exceptions
from ltk.apicalls import ApiCalls
from ltk.utils import *
from ltk.managers import DocumentManager, FolderManager
from ltk.constants import CONF_DIR, CONF_FN, SYSTEM_FILE, ERROR_FN
import json
from ltk.logger import logger
from ltk.git_auto import Git_Auto
from tabulate import tabulate
class Action:
def __init__(self, path, watch=False, timeout=60):
self.host = ''
self.access_token = ''
self.project_id = ''
self.project_name = ''
self.path = path
self.community_id = ''
self.workflow_id = '' # default workflow id; MT phase only
self.locale = ''
self.clone_option = 'on'
self.auto_format_option = ''
self.download_option = 'clone'
self.download_dir = None # directory where downloaded translation will be stored
self.watch_locales = set() # if specified, add these target locales to any files in the watch folder
self.git_autocommit = None
self.git_username = ''
self.git_password = ''
self.append_option = 'none'
self.locale_folders = {}
if not self._is_initialized():
raise exceptions.UninitializedError("This project is not initialized. Please run init command.")
self._initialize_self()
self.watch = watch
self.doc_manager = DocumentManager(self.path)
self.folder_manager = FolderManager(self.path)
self.timeout = timeout
self.api = ApiCalls(self.host, self.access_token, self.watch, self.timeout)
self.git_auto = Git_Auto(self.path)
self.error_file_name = os.path.join(self.path, CONF_DIR, ERROR_FN)
def _is_initialized(self):
actual_path = find_conf(self.path)
if not actual_path:
return False
self.path = os.path.join(actual_path, '')
if not is_initialized(self.path):
return False
return True
def _initialize_self(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
self.host = conf_parser.get('main', 'host')
self.access_token = conf_parser.get('main', 'access_token')
self.project_id = conf_parser.get('main', 'project_id')
self.community_id = conf_parser.get('main', 'community_id')
self.workflow_id = conf_parser.get('main', 'workflow_id')
self.locale = conf_parser.get('main', 'default_locale')
self.locale = self.locale.replace('_','-')
try:
if conf_parser.has_option('main', 'auto_format'):
self.auto_format_option = conf_parser.get('main', 'auto_format')
else:
self.update_config_file('auto_format', 'on', conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'project_name'):
self.project_name = conf_parser.get('main', 'project_name')
if conf_parser.has_option('main', 'download_folder'):
self.download_dir = conf_parser.get('main', 'download_folder')
else:
self.download_dir = None
self.update_config_file('download_folder', json.dumps(self.download_dir), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'watch_locales'):
watch_locales = conf_parser.get('main', 'watch_locales')
if watch_locales:
self.watch_locales = set(watch_locales.split(','))
else:
# there are no watch locales, so set it to an empty set
self.watch_locales = set()
else:
self.watch_locales = set()
self.update_config_file('watch_locales', json.dumps(list(self.watch_locales)), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'locale_folders'):
self.locale_folders = json.loads(conf_parser.get('main', 'locale_folders'))
locale_folders = {}
#for key, value in self.locale_folders.items():
# key = key.replace('_', '-');
# locale_folders[key] = value
#self.locale_folders = locale_folders
else:
self.locale_folders = {}
self.update_config_file('locale_folders', json.dumps(self.locale_folders), conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'download_option'):
self.download_option = conf_parser.get('main', 'download_option')
else:
self.download_option = 'clone'
self.update_config_file('download_option', self.download_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'clone_option'):
self.clone_option = conf_parser.get('main', 'clone_option')
else:
self.clone_option = 'on'
self.update_config_file('clone_option', self.clone_option, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_autocommit'):
self.git_autocommit = conf_parser.get('main', 'git_autocommit')
else:
self.git_autocommit = ''
self.update_config_file('git_autocommit', self.git_autocommit, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_username'):
self.git_username = conf_parser.get('main', 'git_username')
else:
self.git_username = ''
self.update_config_file('git_username', self.git_username, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'git_password'):
self.git_password = conf_parser.get('main', 'git_password')
else:
self.git_password = ''
self.update_config_file('git_password', self.git_password, conf_parser, config_file_name, "")
if conf_parser.has_option('main', 'append_option'):
self.append_option = conf_parser.get('main', 'append_option')
else:
self.append_option = 'none'
self.update_config_file('append_option', self.append_option, conf_parser, config_file_name, "")
except NoOptionError as e:
if not self.project_name:
self.api = ApiCalls(self.host, self.access_token)
project_info = self.api.get_project_info(self.community_id)
self.project_name = project_info[self.project_id]
config_file_name, conf_parser = self.init_config_file()
log_info = 'Updated project name'
self.update_config_file('project_name', self.project_name, conf_parser, config_file_name, log_info)
def _add_document(self, file_name, title, doc_id):
""" adds a document to db """
now = time.time()
# doc_id = json['properties']['id']
full_path = os.path.join(self.path, file_name)
last_modified = os.stat(full_path).st_mtime
self.doc_manager.add_document(title, now, doc_id, last_modified, now, file_name)
def _update_document(self, file_name):
""" updates a document in the db """
now = time.time()
file_path = os.path.join(self.path, file_name)
# sys_last_modified = os.stat(file_name).st_mtime
sys_last_modified = os.stat(file_path).st_mtime
entry = self.doc_manager.get_doc_by_prop('file_name', file_name)
doc_id = entry['id']
self.doc_manager.update_document('last_mod', now, doc_id)
self.doc_manager.update_document('sys_last_mod', sys_last_modified, doc_id)
# whenever a document is updated, it should have new translations
self.doc_manager.update_document('downloaded', [], doc_id)
def close(self):
self.doc_manager.close_db()
def open(self):
self.doc_manager.open_db()
def init_config_file(self):
config_file_name = os.path.join(self.path, CONF_DIR, CONF_FN)
conf_parser = ConfigParser()
conf_parser.read(config_file_name)
return config_file_name, conf_parser
def update_config_file(self, option, value, conf_parser, config_file_name, log_info):
try:
conf_parser.set('main', option, value)
with open(config_file_name, 'w') as new_file:
conf_parser.write(new_file)
self._initialize_self()
if (len(log_info)):
logger.info(log_info+"\n")
except IOError as e:
print(e.errno)
print(e)
def get_relative_path(self, path):
return get_relative_path(self.path, path)
def get_current_path(self, path):
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
return path
else:
cwd_relative_path = cwd.replace(self.path,"")
return path.replace(cwd_relative_path+os.sep,"")
def get_current_abs(self, path):
# print("orig path: "+str(path))
cwd = os.getcwd()
if cwd in path:
path = path.replace(cwd,"")
else:
# print("cwd: "+cwd)
# print("self.path: "+self.path)
cwd_relative_path = cwd.replace(self.path,"")
# print("cwd relative path: "+cwd_relative_path)
cwd_path = path.replace(cwd_relative_path+os.sep,"")
# print("cwd path: "+cwd_path)
path = cwd_path
# print("current path: "+path)
# print("abs path: "+os.path.abspath(path))
return os.path.abspath(path)
def norm_path(self, file_location):
# print("original path: "+str(file_location))
if file_location:
file_location = os.path.normpath(file_location)
# abspath=os.path.abspath(file_location)
# print("abspath: "+str(os.path.abspath(os.path.expanduser(file_location))))
# print("self.path: "+self.path)
# print("cwd: "+str(os.getcwd()))
norm_path = os.path.abspath(os.path.expanduser(file_location)).replace(self.path, '')
# print("normalized path: "+norm_path)
# print("joined path: "+str(os.path.join(self.path,file_location)))
# if file_location == ".." and self.path.rstrip('/') in norm_path:
# return norm_path.replace(self.path.rstrip('/'), '')
if file_location is not "." and ".." not in file_location and os.path.exists(os.path.join(self.path,file_location)):
# print("returning original path: "+str(file_location))
return file_location.replace(self.path, '')
elif ".." in file_location and file_location != "..":
# print("returning norm path: "+norm_path)
return norm_path.replace(self.path,'')
if not os.path.exists(os.path.join(self.path,norm_path)) and os.path.exists(os.path.join(self.path,file_location)):
# print("Starting path at project directory: "+file_location.replace(self.path, ''))
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
elif file_location == "..":
return os.path.abspath(os.path.expanduser(file_location.replace(self.path, ''))).replace(self.path, '')
return norm_path
else:
return None
def get_docs_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(self.doc_manager.get_doc_by_prop('file_name',file_name))
return docs
def get_doc_filenames_in_path(self, path):
files = get_files(path)
db_files = self.doc_manager.get_file_names()
docs = []
if files:
for file in files:
file_name = self.norm_path(file)
if file_name in db_files:
docs.append(file_name)
return docs
def get_doc_locales(self, doc_id, doc_name):
locales = []
response = self.api.document_translation_status(doc_id)
if response.status_code != 200:
if check_response(response) and response.json()['messages'] and 'No translations exist' in response.json()['messages'][0]:
return locales
if doc_name:
raise_error(response.json(), 'Failed to check target locales for document '+doc_name, True, doc_id)
else:
raise_error(response.json(), 'Failed to check target locales for document '+doc_id, True, doc_id)
try:
if 'entities' in response.json():
for entry in response.json()['entities']:
locales.append(entry['properties']['locale_code'])
except KeyError as e:
print("Error listing translations")
return
# return detailed_status
return locales
def is_locale_folder_taken(self, new_locale, path):
# Python 2
# for locale, folder in self.locale_folders.iteritems():
# End Python 2
# Python 3
for locale, folder in self.locale_folders.items():
# End Python 3
if path == folder and not locale == new_locale:
return locale
return False
def update_document_action(self, file_name, title=None, **kwargs):
try:
relative_path = self.norm_path(file_name)
entry = self.doc_manager.get_doc_by_prop('file_name', relative_path)
try:
document_id = entry['id']
except TypeError as e:
log_error(self.error_file_name, e)
logger.error("Document name specified for update doesn't exist: {0}".format(title))
return
if title:
response = self.api.document_update(document_id, file_name, title=title, **kwargs)
else:
response = self.api.document_update(document_id, file_name)
if response.status_code != 202:
raise_error(response.json(), "Failed to update document {0}".format(file_name), True)
self._update_document(relative_path)
return True
except Exception as e:
log_error(self.error_file_name, e)
if 'string indices must be integers' in str(e) or 'Expecting value: line 1 column 1' in str(e):
logger.error("Error connecting to Lingotek's TMS")
else:
logger.error("Error on updating document"+str(file_name)+": "+str(e))
def _target_action_db(self, to_delete, locales, document_id):
if to_delete:
curr_locales = self.doc_manager.get_doc_by_prop('id', document_id)['locales']
updated_locales = set(curr_locales) - set(locales)
self.doc_manager.update_document('locales', updated_locales, document_id)
else:
self.doc_manager.update_document('locales', list(locales), document_id)
def update_doc_locales(self, document_id):
try:
locale_map = self.import_locale_info(document_id)
locale_info = list(iter(locale_map))
except exceptions.RequestFailedError as e:
log_error(self.error_file_name, e)
locale_info = []
self.doc_manager.update_document('locales', locale_info, document_id)
def added_folder_of_file(self, file_path):
folders = self.folder_manager.get_file_names()
if not folders:
#print("not folders")
return
for folder in folders:
folder = os.path.join(self.path, folder)
if folder in file_path:
return folder
def get_new_name(self, file_name, curr_path):
i = 1
file_path = os.path.join(curr_path, file_name)
name, extension = os.path.splitext(file_name)
while os.path.isfile(file_path):
new_name = '{name}({i}){ext}'.format(name=name, i=i, ext=extension)
file_path = os.path.join(curr_path, new_name)
i += 1
return file_path
def import_locale_info(self, document_id, poll=False):
locale_progress = {}
response = self.api.document_translation_status(document_id)
if response.status_code != 200:
if poll or response.status_code == 404:
return {}
else:
# raise_error(response.json(), 'Failed to get locale details of document', True)
raise exceptions.RequestFailedError('Failed to get locale details of document')
try:
for entry in response.json()['entities']:
curr_locale = entry['properties']['locale_code']
curr_progress = int(entry['properties']['percent_complete'])
curr_locale = curr_locale.replace('-', '_')
locale_progress[curr_locale] = curr_progress
except KeyError:
pass
return locale_progress
def delete_local(self, title, document_id, message=None):
# print('local delete:', title, document_id)
if not title:
title = document_id
message = '{0} has been deleted locally'.format(title) if not message else message
try:
file_name = self.doc_manager.get_doc_by_prop('id', document_id)['file_name']
except TypeError:
logger.info('Document to remove not found in the local database')
return
try:
os.remove(os.path.join(self.path, file_name))
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def delete_local_translation(self, file_name):
try:
if not file_name:
logger.info('Please provide a valid file name')
logger.info('{0} (local translation) has been deleted'.format(self.get_relative_path(file_name)))
os.remove(os.path.join(self.path, file_name))
except OSError:
logger.info('Something went wrong trying to download the local translation')
def delete_local_path(self, path, message=None):
path = self.norm_path(path)
message = '{0} has been deleted locally.'.format(path) if not message else message
try:
os.remove(path)
logger.info(message)
except OSError:
logger.info('Something went wrong trying to delete the local file')
def raise_error(json, error_message, is_warning=False, doc_id=None, file_name=None):
try:
if json:
error = json['messages'][0]
file_name = file_name.replace("Status of ", "")
if file_name is not None and doc_id is not None:
error = error.replace(doc_id, file_name+" ("+doc_id+")")
# Sometimes api returns vague errors like 'Unknown error'
if error == 'Unknown error':
error = error_message
if not is_warning:
raise exceptions.RequestFailedError(error)
# warnings.warn(error)
logger.error(error)
except (AttributeError, IndexError):
if not is_warning:
raise exceptions.RequestFailedError(error_message)
# warnings.warn(error_message)
logger.error(error_message)
def is_initialized(project_path):
ltk_path = os.path.join(project_path, CONF_DIR)
if os.path.isdir(ltk_path) and os.path.isfile(os.path.join(ltk_path, CONF_FN)) and \
os.stat(os.path.join(ltk_path, CONF_FN)).st_size:
return True
return False
def choice_mapper(info):
mapper = {}
import operator
#sorted_info = sorted(info.iteritems(), key=operator.itemgetter(1))
sorted_info = sorted(info.items(), key = operator.itemgetter(1))
index = 0
for entry in sorted_info:
if entry[0] and entry[1]:
mapper[index] = {entry[0]: entry[1]}
index += 1
table = []
for k,v in mapper.items():
try:
for values in v:
table.append({
"ID": k,
"Name": v[values],
"UUID": values
})
except UnicodeEncodeError:
continue
print(tabulate(table, headers="keys"), "\n")
return mapper
def find_conf(curr_path):
"""
check if the conf folder exists in current directory's parent directories
"""
if os.path.isdir(os.path.join(curr_path, CONF_DIR)):
return curr_path
elif curr_path == os.path.abspath(os.sep):
return None
else:
return find_conf(os.path.abspath(os.path.join(curr_path, os.pardir)))
def printResponseMessages(response):
for message in response.json()['messages']:
logger.info(message)
def get_files(patterns):
""" gets all files matching pattern from root
pattern supports any unix shell-style wildcards (not same as RE) """
cwd = os.getcwd()
if isinstance(patterns,str):
patterns = [patterns]
allPatterns = []
if isinstance(patterns,list) or isinstance(patterns,tuple):
for pattern in patterns:
basename = os.path.basename(pattern)
if basename and basename != "":
allPatterns.extend(getRegexFiles(pattern,cwd))
else:
allPatterns.append(pattern)
else:
basename = os.path.basename(patterns)
if basename and basename != "":
allPatterns.extend(getRegexFiles(patterns,cwd))
else:
allPatterns.append(patterns)
matched_files = []
# print("all patterns: "+str(allPatterns))
for pattern in allPatterns:
path = os.path.abspath(pattern)
# print("looking at path "+str(path))
# check if pattern contains subdirectory
if os.path.exists(path):
if os.path.isdir(path):
for root, subdirs, files in os.walk(path):
# split_path = root.split(os.sep)
# print("split_path: {0}".format(split_path))
for file in files:
if not (("desktop.ini" in file) or ('Thumbs.db' in file) or ('ehthumbs.db' in file)): # don't add desktop.ini, Thumbs.db, or ehthumbs.db files
matched_files.append(os.path.join(root, file))
else:
matched_files.append(path)
# else:
# logger.info("File not found: "+pattern)
# subdir_pat, fn_pat = os.path.split(pattern)
# if not subdir_pat:
# for path, subdirs, files in os.walk(root):
# for fn in fnmatch.filter(files, pattern):
# matched_files.append(os.path.join(path, fn))
# else:
# for path, subdirs, files in os.walk(root):
# # print os.path.split(path)
# # subdir = os.path.split(path)[1] # get current subdir
# search_root = os.path.join(root, '')
# subdir = path.replace(search_root, '')
# # print subdir, subdir_pat
# if fnmatch.fnmatch(subdir, subdir_pat):
# for fn in fnmatch.filter(files, fn_pat):
# matched_files.append(os.path.join(path, fn))
if len(matched_files) == 0:
return None
return matched_files
def getRegexFiles(pattern,path):
dir_name = os.path.dirname(pattern)
if dir_name:
path = os.path.join(path,dir_name)
pattern_name = os.path.basename(pattern)
# print("path: "+path)
# print("pattern: "+str(pattern))
matched_files = []
if pattern_name and not "*" in pattern:
return [pattern]
for path, subdirs, files in os.walk(path):
for fn in fnmatch.filter(files, pattern):
matched_files.append(os.path.join(path, fn))
# print("matched files: "+str(matched_files))
return matched_files
def log_id_names(json):
"""
logs the id and titles from a json object
"""
ids = []
titles = []
for entity in json['entities']:
ids.append(entity['properties']['id'])
titles.append(entity['properties']['title'])
return ids, titles
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import inspect
import re
from collections import OrderedDict
from docutils.core import publish_parts
from six import string_types
from six.moves import range
from pants.base.build_environment import get_buildroot
from pants.base.build_manual import get_builddict_info
from pants.base.exceptions import TaskError
from pants.base.generator import TemplateData
from pants.build_graph.target import Target
from pants.goal.goal import Goal
from pants.help.help_info_extracter import HelpInfoExtracter
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.options_bootstrapper import OptionsBootstrapper
# TODO(benjy): Rewrite this to handle subsystem options, and recursive/advanced options.
# Ideally the latter would be expandable or linked or something, so there's not a ton of clutter.
# Also make this code better (at least wrap it in a class and give the methods comprehensible
# names), and make its output prettier and more dynamic.
# This might make a good documentation hack week project for someone.
buildroot = get_buildroot()
# Our CLI help and doc-website-gen use this to get useful help text.
def indent_docstring_by_n(s, n=1):
"""Given a non-empty docstring, return version indented N spaces.
Given an empty thing, return the thing itself."""
# In reST, it's useful to have strings that are similarly-indented.
# If we have a classdoc indented by 2 next to an __init__ funcdoc indented
# by 4, reST doesn't format things nicely. Oh, totally-dedenting doesn't
# format nicely either.
# Docstring indentation: more gnarly than you'd think:
# http://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
if not s: return s
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = s.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = 999
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < 999:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
indent = n * " "
return '\n'.join([indent + t for t in trimmed])
def dedent_docstring(s):
return indent_docstring_by_n(s, 0)
def rst_to_html(in_rst):
"""Returns HTML rendering of an RST fragment.
:param in_rst: rst-formatted string
"""
if not in_rst:
return ''
return publish_parts(in_rst, writer_name='html')['body'].strip()
def entry(nom, classdoc_rst=None, classdoc_html=None,
msg_rst=None, msg_html=None, argspec=None,
funcdoc_rst=None, funcdoc_html=None, methods=None, paramdocs=None,
impl=None, indent=1):
"""Create a struct that our template expects to see.
:param nom: Symbol name, e.g. python_binary
:param classdoc_rst: plain text appears above argspec
:param msg_rst: reST. useful in hand-crafted entries
:param argspec: arg string like (x, y="deflt")
:param funcdoc_rst: function's __doc__, plain text
:param methods: list of entries for class' methods
:param impl: name of thing that implements this.
E.g., "pants.backend.core.tasks.builddict.BuildBuildDictionary"
:param indent: spaces to indent; rst uses this for outline level
"""
return TemplateData(
nom=nom.strip(),
classdoc_rst=indent_docstring_by_n(classdoc_rst),
classdoc_html=classdoc_html,
msg_html=msg_html,
msg_rst=indent_docstring_by_n(msg_rst, indent),
argspec=argspec,
funcdoc_html=funcdoc_html,
funcdoc_rst=indent_docstring_by_n(funcdoc_rst, indent),
methods=methods,
showmethods=len(methods or []) > 0,
paramdocs=paramdocs,
showparams=paramdocs and (len(paramdocs) > 0),
impl=impl)
def msg_entry(nom, msg_rst, msg_html):
"""For hard-wired entries a la "See Instead" or other simple stuff
:param nom: name
:param msg_rst: restructured text message
:param msg_html: HTML message; by default, convert from rst"""
return entry(nom, msg_rst=msg_rst, msg_html=msg_html)
def entry_for_one_func(nom, func):
"""Generate a BUILD dictionary entry for a function
nom: name like 'python_binary'
func: function object"""
args, varargs, varkw, defaults = inspect.getargspec(func)
argspec = inspect.formatargspec(args, varargs, varkw, defaults)
funcdoc_body_rst = docstring_to_body(dedent_docstring(func.__doc__))
funcdoc_body_html = rst_to_html(funcdoc_body_rst)
param_docshards = shard_param_docstring(dedent_docstring(func.__doc__))
paramdocs = param_docshards_to_template_datas(param_docshards)
return entry(nom,
argspec=argspec,
funcdoc_html=funcdoc_body_html,
funcdoc_rst=func.__doc__ or '',
impl="{0}.{1}".format(func.__module__, func.__name__),
paramdocs=paramdocs)
def entry_for_one_method(nom, method):
"""Generate a BUILD dictionary entry for a method
nom: name like 'with_description'
method: method object"""
# TODO(lhosken) : This is darned similar to entry_for_one_func. Merge 'em?
# (Punted so far since funcdoc indentation made my head hurt,
# but that will go away when we stop generating RST)
assert inspect.ismethod(method)
args, varargs, varkw, defaults = inspect.getargspec(method)
# args[:1] instead of args to discard "self" arg
argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)
funcdoc_body_rst = docstring_to_body(dedent_docstring(method.__doc__))
funcdoc_body_html = rst_to_html(funcdoc_body_rst)
param_docshards = shard_param_docstring(dedent_docstring(method.__doc__))
paramdocs = param_docshards_to_template_datas(param_docshards)
return entry(nom,
argspec=argspec,
funcdoc_html=funcdoc_body_html,
funcdoc_rst=(method.__doc__ or ""),
paramdocs=paramdocs,
indent=2)
# regex for docstring lines of the forms
# :param foo: blah blah blah
# :param string foo: blah blah blah
param_re = re.compile(r':param (?P<type>[A-Za-z0-9_]* )?(?P<param>[^:]*):(?P<desc>.*)')
# regex for docstring lines of the form
# :type foo: list of strings
type_re = re.compile(r':type (?P<param>[^:]*):(?P<type>.*)')
def docstring_to_body(docstring):
"""Passed a sphinx-flavored docstring, return just the "body" part.
Filter out the :param...: and :type...: part, if any.
"""
docstring = docstring or ''
body = '' # return value
recording_state = True # are we "recording" or not
for line in docstring.splitlines():
if line and not line[0].isspace():
if any(r.match(line) for r in [param_re, type_re]):
recording_state = False
else:
recording_state = True
if recording_state:
body += line + '\n'
return body
def shard_param_docstring(docstring):
"""Shard a sphinx-flavored __init__ docstring by param
E.g., if the docstring is
:param float x: x coordinate
blah blah blah
:param y: y coordinate
:type y: float
should return
OrderedDict(
'x' : {'type': 'float', 'param': 'x coordinate\n blah blah blah'},
'y' : {'type': 'float', 'param': 'y coordinate'},
)
"""
docstring = docstring or ''
# state: what I'm "recording" right now. Needed for multi-line fields.
# ('x', 'param') : recording contents of a :param x: blah blah blah
# ('x', 'type') : recording contents of a :type x: blah blah blah
# ('!forget', '!') not recording useful things; purged before returning
state = ('!forget', '!')
# shards: return value
shards = OrderedDict([('!forget', {'!': ''})])
for line in docstring.splitlines():
# If this line is indented, keep "recording" whatever we're recording:
if line and line[0].isspace():
param, type_or_desc = state
shards[param][type_or_desc] += '\n' + line
else: # line not indented, starting something new
# if a :param foo: line...
if param_re.match(line):
param_m = param_re.match(line)
param_name = param_m.group('param')
state = (param_name, 'param')
if not param_name in shards:
shards[param_name] = {}
if param_m.group('type'):
shards[param_name]['type'] = param_m.group('type')
shards[param_name]['param'] = param_m.group('desc')
# if a :type foo: line...
elif type_re.match(line):
type_m = type_re.match(line)
param_name = type_m.group('param')
state = (param_name, 'type')
if not param_name in shards:
shards[param_name] = {}
shards[param_name]['type'] = type_m.group('type')
# else, nothing that we want to "record"
else:
state = ('!forget', '!')
del shards['!forget']
return shards
def param_docshards_to_template_datas(funcdoc_shards):
template_datas = []
if funcdoc_shards:
for param, parts in funcdoc_shards.items():
if 'type' in parts:
type_ = parts['type']
else:
type_ = None
if 'param' in parts:
desc = rst_to_html(dedent_docstring(parts['param']))
else:
desc = None
template_datas.append(TemplateData(param=param, typ=type_, desc=desc))
return template_datas
def info_for_target_class(cls):
"""Walk up inheritance tree to get info about constructor args.
Helper function for entry_for_one_class. Target classes use inheritance
to handle constructor params. If you try to get the argspec for, e.g.,
`JunitTests.__init__`, it won't mention the `name` parameter, because
that's handled by the `Target` superclass.
"""
# args to not-document. BUILD file authors shouldn't
# use these; they're meant to be impl-only.
ARGS_SUPPRESS = ['address', 'build_graph', 'payload']
# "accumulate" argspec and docstring fragments going up inheritance tree.
suppress = set(ARGS_SUPPRESS) # only show things once. don't show silly things
args_accumulator = []
defaults_accumulator = []
docs_accumulator = []
for c in inspect.getmro(cls):
if not issubclass(c, Target): continue
if not inspect.ismethod(c.__init__): continue
args, _, _, defaults = inspect.getargspec(c.__init__)
args_that_have_defaults = args[len(args) - len(defaults or ()):]
args_with_no_defaults = args[1:(len(args) - len(defaults or ()))]
for i in range(len(args_that_have_defaults)):
arg = args_that_have_defaults[i]
if not arg in suppress:
suppress.add(arg)
args_accumulator.append(arg)
defaults_accumulator.append(defaults[i])
for arg in args_with_no_defaults:
if not arg in suppress:
suppress.add(arg)
args_accumulator.insert(0, arg)
dedented_doc = dedent_docstring(c.__init__.__doc__)
docs_accumulator.append(shard_param_docstring(dedented_doc))
argspec = inspect.formatargspec(args_accumulator,
None,
None,
defaults_accumulator)
suppress = set(ARGS_SUPPRESS) # only show things once. don't show silly things
funcdoc_rst = ''
funcdoc_shards = OrderedDict()
for shard in docs_accumulator:
for param, parts in shard.items():
if param in suppress:
continue
suppress.add(param)
funcdoc_shards[param] = parts
# Don't interpret param names like "type_" as links.
if 'type' in parts:
funcdoc_rst += '\n:type {0}: {1}'.format(param, parts['type'])
if 'param' in parts:
funcdoc_rst += '\n:param {0}: {1}'.format(param, parts['param'])
paramdocs = param_docshards_to_template_datas(funcdoc_shards)
return(argspec, funcdoc_rst, paramdocs)
def entry_for_one_class(nom, cls):
""" Generate a BUILD dictionary entry for a class.
nom: name like 'python_binary'
cls: class like pants.python_binary"""
if issubclass(cls, Target):
# special case for Target classes: "inherit" information up the class tree.
(argspec, funcdoc_rst, paramdocs) = info_for_target_class(cls)
else:
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults)
funcdoc_shards = shard_param_docstring(dedent_docstring(cls.__init__.__doc__))
paramdocs = param_docshards_to_template_datas(funcdoc_shards)
funcdoc_rst = cls.__init__.__doc__
methods = []
for attrname in dir(cls):
attr = getattr(cls, attrname)
info = get_builddict_info(attr)
# we want methods tagged @manual.builddict--except factory functions
if info and not info.get('factory', False):
if inspect.ismethod(attr):
methods.append(entry_for_one_method(attrname, attr))
else:
raise TaskError('@manual.builddict() on non-method {0}'
' within class {1}'.format(attrname, nom))
return entry(nom,
classdoc_rst=cls.__doc__,
classdoc_html=rst_to_html(dedent_docstring(cls.__doc__)),
argspec=argspec,
funcdoc_rst=funcdoc_rst,
methods=methods,
paramdocs=paramdocs,
impl='{0}.{1}'.format(cls.__module__, cls.__name__))
def entry_for_one(nom, sym):
if inspect.isclass(sym):
return entry_for_one_class(nom, sym)
info = get_builddict_info(sym)
if info and info.get('factory'):
# instead of getting factory info, get info about associated class:
return entry_for_one_class(nom, sym.im_self)
if inspect.ismethod(sym) or inspect.isfunction(sym):
return entry_for_one_func(nom, sym)
return msg_entry(nom,
"TODO! no doc gen for {} {}".format(str(type(sym)), str(sym)),
"TODO! no doc gen for {} {}".format(str(type(sym)), str(sym)))
PREDEFS = { # some hardwired entries
'dependencies': {'defn':
msg_entry('dependencies',
'Old name for `target`_',
'Old name for <a href="#target">target</a>')},
'egg': {'defn': msg_entry('egg',
'In older Pants, loads a pre-built Python egg '
'from file system. Undefined in newer Pants.',
'In older Pants, loads a pre-built Python egg '
'from file system. Undefined in newer Pants.')},
'java_tests': {'defn':
msg_entry('java_tests',
'Old name for `junit_tests`_',
'Old name for <a href="#junit_tests">junit_tests</a>')},
'pants': {'defn':
msg_entry('pants',
"""In old Pants versions, a reference to a Pants targets.
(In new Pants versions, just use strings.)""",
"""In old Pants versions, a reference to a Pants targets.
(In new Pants versions, just use strings.)""")},
'python_artifact': {'suppress': True}, # unused alias for PythonArtifact
'python_test_suite': {'defn':
msg_entry('python_test_suite',
'Deprecated way to group Python tests;'
' use `target`_',
'Deprecated way to group Python tests;'
' use <a href="#target">target</a>')},
}
# Report symbols defined in BUILD files (jvm_binary...)
# Returns dict {"scala_library": ScalaLibrary, ...}
def get_syms(build_file_parser):
syms = {}
def map_symbols(symbols):
for sym, item in symbols.items():
if sym not in PREDEFS:
syms[sym] = item
aliases = build_file_parser.registered_aliases()
map_symbols(aliases.target_types)
# TODO(John Sirois): Handle mapping the `Macro.expand` arguments - these are the real arguments
# to document and may be different than the set gathered from walking the Target hierarchy.
for alias, target_macro_factory in aliases.target_macro_factories.items():
for target_type in target_macro_factory.target_types:
map_symbols({alias: target_type})
map_symbols(aliases.objects)
map_symbols(aliases.context_aware_object_factories)
return syms
def bootstrap_option_values():
return OptionsBootstrapper().get_bootstrap_options().for_global_scope()
def gen_glopts_reference_data(options):
parser = options.get_parser(GLOBAL_SCOPE)
oschi = HelpInfoExtracter.get_option_scope_help_info_from_parser(parser)
return oref_template_data_from_help_info(oschi)
def oref_template_data_from_help_info(oschi):
"""Get data for the Options Reference from an OptionScopeHelpInfo instance."""
def sub_buildroot(s):
if isinstance(s, string_types):
return s.replace(buildroot, '<buildroot>')
else:
return s
title = oschi.scope
pantsref = ''.join([c for c in title if c.isalnum()])
option_l = []
for ohi in oschi.basic:
st = '/'.join(ohi.display_args)
hlp = None
if ohi.help:
hlp = indent_docstring_by_n(sub_buildroot(ohi.help), 6)
option_l.append(TemplateData(
st=st,
fromfile=ohi.fromfile,
default=sub_buildroot(ohi.default),
hlp=hlp,
typ=ohi.typ.__name__))
return TemplateData(
title=title,
options=option_l,
pantsref=pantsref)
def gen_tasks_options_reference_data(options):
"""Generate the template data for the options reference rst doc."""
goal_dict = {}
goal_names = []
for goal in Goal.all():
tasks = []
for task_name in goal.ordered_task_names():
task_type = goal.task_type_by_name(task_name)
# task_type may actually be a synthetic subclass of the authored class from the source code.
# We want to display the authored class's name in the docs.
for authored_task_type in task_type.mro():
if authored_task_type.__module__ != 'abc':
break
doc_rst = indent_docstring_by_n(authored_task_type.__doc__ or '', 2)
doc_html = rst_to_html(dedent_docstring(authored_task_type.__doc__))
parser = options.get_parser(task_type.options_scope)
oschi = HelpInfoExtracter.get_option_scope_help_info_from_parser(parser)
impl = '{0}.{1}'.format(authored_task_type.__module__, authored_task_type.__name__)
tasks.append(TemplateData(
impl=impl,
doc_html=doc_html,
doc_rst=doc_rst,
ogroup=oref_template_data_from_help_info(oschi)))
goal_dict[goal.name] = TemplateData(goal=goal, tasks=tasks)
goal_names.append(goal.name)
goals = [goal_dict[name] for name in sorted(goal_names, key=lambda x: x.lower())]
return goals
def assemble_buildsyms(predefs=PREDEFS, build_file_parser=None):
"""Assemble big hash of entries suitable for smushing into a template.
predefs: Hash of "hard-wired" predefined entries.
build_file_parser: BuildFileParser which knows the BUILD-file symbols defined
for this run of Pants; hopefully knows ~the same symbols defined for a
"typical" run of Pants.
"""
retval = {}
for nom in predefs:
val = predefs[nom]
if 'suppress' in val and val['suppress']:
continue
retval[nom] = val
if build_file_parser:
symbol_hash = get_syms(build_file_parser)
for nom in symbol_hash:
v = symbol_hash[nom]
bdi = get_builddict_info(v)
if bdi and 'suppress' in bdi and bdi['suppress']:
continue
retval[nom] = {'defn': entry_for_one(nom, v)}
return retval
|
|
#
# The Python Imaging Library.
# $Id$
#
# PIL raster font management
#
# History:
# 1996-08-07 fl created (experimental)
# 1997-08-25 fl minor adjustments to handle fonts from pilfont 0.3
# 1999-02-06 fl rewrote most font management stuff in C
# 1999-03-17 fl take pth files into account in load_path (from Richard Jones)
# 2001-02-17 fl added freetype support
# 2001-05-09 fl added TransposedFont wrapper class
# 2002-03-04 fl make sure we have a "L" or "1" font
# 2002-12-04 fl skip non-directory entries in the system path
# 2003-04-29 fl add embedded default font
# 2003-09-27 fl added support for truetype charmap encodings
#
# Todo:
# Adapt to PILFONT2 format (16-bit fonts, compressed, single file)
#
# Copyright (c) 1997-2003 by Secret Labs AB
# Copyright (c) 1996-2003 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from . import Image
from ._util import isDirectory, isPath, py3
import os
import sys
LAYOUT_BASIC = 0
LAYOUT_RAQM = 1
class _imagingft_not_installed(object):
# module placeholder
def __getattr__(self, id):
raise ImportError("The _imagingft C module is not installed")
try:
from . import _imagingft as core
except ImportError:
core = _imagingft_not_installed()
# FIXME: add support for pilfont2 format (see FontFile.py)
# --------------------------------------------------------------------
# Font metrics format:
# "PILfont" LF
# fontdescriptor LF
# (optional) key=value... LF
# "DATA" LF
# binary data: 256*10*2 bytes (dx, dy, dstbox, srcbox)
#
# To place a character, cut out srcbox and paste at dstbox,
# relative to the character position. Then move the character
# position according to dx, dy.
# --------------------------------------------------------------------
class ImageFont(object):
"PIL font wrapper"
def _load_pilfont(self, filename):
with open(filename, "rb") as fp:
for ext in (".png", ".gif", ".pbm"):
try:
fullname = os.path.splitext(filename)[0] + ext
image = Image.open(fullname)
except Exception:
pass
else:
if image and image.mode in ("1", "L"):
break
else:
raise IOError("cannot find glyph data file")
self.file = fullname
return self._load_pilfont_data(fp, image)
def _load_pilfont_data(self, file, image):
# read PILfont header
if file.readline() != b"PILfont\n":
raise SyntaxError("Not a PILfont file")
file.readline().split(b";")
self.info = [] # FIXME: should be a dictionary
while True:
s = file.readline()
if not s or s == b"DATA\n":
break
self.info.append(s)
# read PILfont metrics
data = file.read(256*20)
# check image
if image.mode not in ("1", "L"):
raise TypeError("invalid font image mode")
image.load()
self.font = Image.core.font(image.im, data)
def getsize(self, text, *args, **kwargs):
return self.font.getsize(text)
def getmask(self, text, mode="", *args, **kwargs):
return self.font.getmask(text, mode)
##
# Wrapper for FreeType fonts. Application code should use the
# <b>truetype</b> factory function to create font objects.
class FreeTypeFont(object):
"FreeType font wrapper (requires _imagingft service)"
def __init__(self, font=None, size=10, index=0, encoding="",
layout_engine=None):
# FIXME: use service provider instead
self.path = font
self.size = size
self.index = index
self.encoding = encoding
if layout_engine not in (LAYOUT_BASIC, LAYOUT_RAQM):
layout_engine = LAYOUT_BASIC
if core.HAVE_RAQM:
layout_engine = LAYOUT_RAQM
if layout_engine == LAYOUT_RAQM and not core.HAVE_RAQM:
layout_engine = LAYOUT_BASIC
self.layout_engine = layout_engine
if isPath(font):
self.font = core.getfont(font, size, index, encoding,
layout_engine=layout_engine)
else:
self.font_bytes = font.read()
self.font = core.getfont(
"", size, index, encoding, self.font_bytes, layout_engine)
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def getname(self):
return self.font.family, self.font.style
def getmetrics(self):
return self.font.ascent, self.font.descent
def getsize(self, text, direction=None, features=None):
size, offset = self.font.getsize(text, direction, features)
return (size[0] + offset[0], size[1] + offset[1])
def getsize_multiline(self, text, direction=None,
spacing=4, features=None):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.getsize('A')[1] + spacing
for line in lines:
line_width, line_height = self.getsize(line, direction, features)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing - spacing
def getoffset(self, text):
return self.font.getsize(text)[1]
def getmask(self, text, mode="", direction=None, features=None):
return self.getmask2(text, mode, direction=direction,
features=features)[0]
def getmask2(self, text, mode="", fill=Image.core.fill, direction=None,
features=None, *args, **kwargs):
size, offset = self.font.getsize(text, direction, features)
im = fill("L", size, 0)
self.font.render(text, im.id, mode == "1", direction, features)
return im, offset
def font_variant(self, font=None, size=None, index=None, encoding=None,
layout_engine=None):
"""
Create a copy of this FreeTypeFont object,
using any specified arguments to override the settings.
Parameters are identical to the parameters used to initialize this
object.
:return: A FreeTypeFont object.
"""
return FreeTypeFont(
font=self.path if font is None else font,
size=self.size if size is None else size,
index=self.index if index is None else index,
encoding=self.encoding if encoding is None else encoding,
layout_engine=layout_engine or self.layout_engine
)
class TransposedFont(object):
"Wrapper for writing rotated or mirrored text"
def __init__(self, font, orientation=None):
"""
Wrapper that creates a transposed font from any existing font
object.
:param font: A font object.
:param orientation: An optional orientation. If given, this should
be one of Image.FLIP_LEFT_RIGHT, Image.FLIP_TOP_BOTTOM,
Image.ROTATE_90, Image.ROTATE_180, or Image.ROTATE_270.
"""
self.font = font
self.orientation = orientation # any 'transpose' argument, or None
def getsize(self, text, *args, **kwargs):
w, h = self.font.getsize(text)
if self.orientation in (Image.ROTATE_90, Image.ROTATE_270):
return h, w
return w, h
def getmask(self, text, mode="", *args, **kwargs):
im = self.font.getmask(text, mode, *args, **kwargs)
if self.orientation is not None:
return im.transpose(self.orientation)
return im
def load(filename):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
f = ImageFont()
f._load_pilfont(filename)
return f
def truetype(font=None, size=10, index=0, encoding="",
layout_engine=None):
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object.
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
This function requires the _imagingft service.
:param font: A filename or file-like object containing a TrueType font.
Under Windows, if the file is not found in this filename,
the loader also looks in Windows :file:`fonts/` directory.
:param size: The requested size, in points.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Common
encodings are "unic" (Unicode), "symb" (Microsoft
Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert),
and "armn" (Apple Roman). See the FreeType documentation
for more information.
:param layout_engine: Which layout engine to use, if available:
`ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.
:return: A font object.
:exception IOError: If the file could not be read.
"""
try:
return FreeTypeFont(font, size, index, encoding, layout_engine)
except IOError:
ttf_filename = os.path.basename(font)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "fonts"))
elif sys.platform in ('linux', 'linux2'):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = '/usr/share'
dirs += [os.path.join(lindir, "fonts")
for lindir in lindirs.split(":")]
elif sys.platform == 'darwin':
dirs += ['/Library/Fonts', '/System/Library/Fonts',
os.path.expanduser('~/Library/Fonts')]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
return FreeTypeFont(fontpath, size, index,
encoding, layout_engine)
elif (not ext and
os.path.splitext(walkfilename)[0] == ttf_filename):
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == '.ttf':
return FreeTypeFont(fontpath, size, index,
encoding, layout_engine)
if not ext \
and first_font_with_a_different_extension is None:
first_font_with_a_different_extension = fontpath
if first_font_with_a_different_extension:
return FreeTypeFont(first_font_with_a_different_extension, size,
index, encoding, layout_engine)
raise
def load_path(filename):
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
for directory in sys.path:
if isDirectory(directory):
if not isinstance(filename, str):
if py3:
filename = filename.decode("utf-8")
else:
filename = filename.encode("utf-8")
try:
return load(os.path.join(directory, filename))
except IOError:
pass
raise IOError("cannot find font file")
def load_default():
"""Load a "better than nothing" default font.
.. versionadded:: 1.1.4
:return: A font object.
"""
from io import BytesIO
import base64
f = ImageFont()
f._load_pilfont_data(
# courB08
BytesIO(base64.b64decode(b'''
UElMZm9udAo7Ozs7OzsxMDsKREFUQQoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAA//8AAQAAAAAAAAABAAEA
BgAAAAH/+gADAAAAAQAAAAMABgAGAAAAAf/6AAT//QADAAAABgADAAYAAAAA//kABQABAAYAAAAL
AAgABgAAAAD/+AAFAAEACwAAABAACQAGAAAAAP/5AAUAAAAQAAAAFQAHAAYAAP////oABQAAABUA
AAAbAAYABgAAAAH/+QAE//wAGwAAAB4AAwAGAAAAAf/5AAQAAQAeAAAAIQAIAAYAAAAB//kABAAB
ACEAAAAkAAgABgAAAAD/+QAE//0AJAAAACgABAAGAAAAAP/6AAX//wAoAAAALQAFAAYAAAAB//8A
BAACAC0AAAAwAAMABgAAAAD//AAF//0AMAAAADUAAQAGAAAAAf//AAMAAAA1AAAANwABAAYAAAAB
//kABQABADcAAAA7AAgABgAAAAD/+QAFAAAAOwAAAEAABwAGAAAAAP/5AAYAAABAAAAARgAHAAYA
AAAA//kABQAAAEYAAABLAAcABgAAAAD/+QAFAAAASwAAAFAABwAGAAAAAP/5AAYAAABQAAAAVgAH
AAYAAAAA//kABQAAAFYAAABbAAcABgAAAAD/+QAFAAAAWwAAAGAABwAGAAAAAP/5AAUAAABgAAAA
ZQAHAAYAAAAA//kABQAAAGUAAABqAAcABgAAAAD/+QAFAAAAagAAAG8ABwAGAAAAAf/8AAMAAABv
AAAAcQAEAAYAAAAA//wAAwACAHEAAAB0AAYABgAAAAD/+gAE//8AdAAAAHgABQAGAAAAAP/7AAT/
/gB4AAAAfAADAAYAAAAB//oABf//AHwAAACAAAUABgAAAAD/+gAFAAAAgAAAAIUABgAGAAAAAP/5
AAYAAQCFAAAAiwAIAAYAAP////oABgAAAIsAAACSAAYABgAA////+gAFAAAAkgAAAJgABgAGAAAA
AP/6AAUAAACYAAAAnQAGAAYAAP////oABQAAAJ0AAACjAAYABgAA////+gAFAAAAowAAAKkABgAG
AAD////6AAUAAACpAAAArwAGAAYAAAAA//oABQAAAK8AAAC0AAYABgAA////+gAGAAAAtAAAALsA
BgAGAAAAAP/6AAQAAAC7AAAAvwAGAAYAAP////oABQAAAL8AAADFAAYABgAA////+gAGAAAAxQAA
AMwABgAGAAD////6AAUAAADMAAAA0gAGAAYAAP////oABQAAANIAAADYAAYABgAA////+gAGAAAA
2AAAAN8ABgAGAAAAAP/6AAUAAADfAAAA5AAGAAYAAP////oABQAAAOQAAADqAAYABgAAAAD/+gAF
AAEA6gAAAO8ABwAGAAD////6AAYAAADvAAAA9gAGAAYAAAAA//oABQAAAPYAAAD7AAYABgAA////
+gAFAAAA+wAAAQEABgAGAAD////6AAYAAAEBAAABCAAGAAYAAP////oABgAAAQgAAAEPAAYABgAA
////+gAGAAABDwAAARYABgAGAAAAAP/6AAYAAAEWAAABHAAGAAYAAP////oABgAAARwAAAEjAAYA
BgAAAAD/+gAFAAABIwAAASgABgAGAAAAAf/5AAQAAQEoAAABKwAIAAYAAAAA//kABAABASsAAAEv
AAgABgAAAAH/+QAEAAEBLwAAATIACAAGAAAAAP/5AAX//AEyAAABNwADAAYAAAAAAAEABgACATcA
AAE9AAEABgAAAAH/+QAE//wBPQAAAUAAAwAGAAAAAP/7AAYAAAFAAAABRgAFAAYAAP////kABQAA
AUYAAAFMAAcABgAAAAD/+wAFAAABTAAAAVEABQAGAAAAAP/5AAYAAAFRAAABVwAHAAYAAAAA//sA
BQAAAVcAAAFcAAUABgAAAAD/+QAFAAABXAAAAWEABwAGAAAAAP/7AAYAAgFhAAABZwAHAAYAAP//
//kABQAAAWcAAAFtAAcABgAAAAD/+QAGAAABbQAAAXMABwAGAAAAAP/5AAQAAgFzAAABdwAJAAYA
AP////kABgAAAXcAAAF+AAcABgAAAAD/+QAGAAABfgAAAYQABwAGAAD////7AAUAAAGEAAABigAF
AAYAAP////sABQAAAYoAAAGQAAUABgAAAAD/+wAFAAABkAAAAZUABQAGAAD////7AAUAAgGVAAAB
mwAHAAYAAAAA//sABgACAZsAAAGhAAcABgAAAAD/+wAGAAABoQAAAacABQAGAAAAAP/7AAYAAAGn
AAABrQAFAAYAAAAA//kABgAAAa0AAAGzAAcABgAA////+wAGAAABswAAAboABQAGAAD////7AAUA
AAG6AAABwAAFAAYAAP////sABgAAAcAAAAHHAAUABgAAAAD/+wAGAAABxwAAAc0ABQAGAAD////7
AAYAAgHNAAAB1AAHAAYAAAAA//sABQAAAdQAAAHZAAUABgAAAAH/+QAFAAEB2QAAAd0ACAAGAAAA
Av/6AAMAAQHdAAAB3gAHAAYAAAAA//kABAABAd4AAAHiAAgABgAAAAD/+wAF//0B4gAAAecAAgAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYAAAAB
//sAAwACAecAAAHpAAcABgAAAAD/+QAFAAEB6QAAAe4ACAAGAAAAAP/5AAYAAAHuAAAB9AAHAAYA
AAAA//oABf//AfQAAAH5AAUABgAAAAD/+QAGAAAB+QAAAf8ABwAGAAAAAv/5AAMAAgH/AAACAAAJ
AAYAAAAA//kABQABAgAAAAIFAAgABgAAAAH/+gAE//sCBQAAAggAAQAGAAAAAP/5AAYAAAIIAAAC
DgAHAAYAAAAB//kABf/+Ag4AAAISAAUABgAA////+wAGAAACEgAAAhkABQAGAAAAAP/7AAX//gIZ
AAACHgADAAYAAAAA//wABf/9Ah4AAAIjAAEABgAAAAD/+QAHAAACIwAAAioABwAGAAAAAP/6AAT/
+wIqAAACLgABAAYAAAAA//kABP/8Ai4AAAIyAAMABgAAAAD/+gAFAAACMgAAAjcABgAGAAAAAf/5
AAT//QI3AAACOgAEAAYAAAAB//kABP/9AjoAAAI9AAQABgAAAAL/+QAE//sCPQAAAj8AAgAGAAD/
///7AAYAAgI/AAACRgAHAAYAAAAA//kABgABAkYAAAJMAAgABgAAAAH//AAD//0CTAAAAk4AAQAG
AAAAAf//AAQAAgJOAAACUQADAAYAAAAB//kABP/9AlEAAAJUAAQABgAAAAH/+QAF//4CVAAAAlgA
BQAGAAD////7AAYAAAJYAAACXwAFAAYAAP////kABgAAAl8AAAJmAAcABgAA////+QAGAAACZgAA
Am0ABwAGAAD////5AAYAAAJtAAACdAAHAAYAAAAA//sABQACAnQAAAJ5AAcABgAA////9wAGAAAC
eQAAAoAACQAGAAD////3AAYAAAKAAAAChwAJAAYAAP////cABgAAAocAAAKOAAkABgAA////9wAG
AAACjgAAApUACQAGAAD////4AAYAAAKVAAACnAAIAAYAAP////cABgAAApwAAAKjAAkABgAA////
+gAGAAACowAAAqoABgAGAAAAAP/6AAUAAgKqAAACrwAIAAYAAP////cABQAAAq8AAAK1AAkABgAA
////9wAFAAACtQAAArsACQAGAAD////3AAUAAAK7AAACwQAJAAYAAP////gABQAAAsEAAALHAAgA
BgAAAAD/9wAEAAACxwAAAssACQAGAAAAAP/3AAQAAALLAAACzwAJAAYAAAAA//cABAAAAs8AAALT
AAkABgAAAAD/+AAEAAAC0wAAAtcACAAGAAD////6AAUAAALXAAAC3QAGAAYAAP////cABgAAAt0A
AALkAAkABgAAAAD/9wAFAAAC5AAAAukACQAGAAAAAP/3AAUAAALpAAAC7gAJAAYAAAAA//cABQAA
Au4AAALzAAkABgAAAAD/9wAFAAAC8wAAAvgACQAGAAAAAP/4AAUAAAL4AAAC/QAIAAYAAAAA//oA
Bf//Av0AAAMCAAUABgAA////+gAGAAADAgAAAwkABgAGAAD////3AAYAAAMJAAADEAAJAAYAAP//
//cABgAAAxAAAAMXAAkABgAA////9wAGAAADFwAAAx4ACQAGAAD////4AAYAAAAAAAoABwASAAYA
AP////cABgAAAAcACgAOABMABgAA////+gAFAAAADgAKABQAEAAGAAD////6AAYAAAAUAAoAGwAQ
AAYAAAAA//gABgAAABsACgAhABIABgAAAAD/+AAGAAAAIQAKACcAEgAGAAAAAP/4AAYAAAAnAAoA
LQASAAYAAAAA//gABgAAAC0ACgAzABIABgAAAAD/+QAGAAAAMwAKADkAEQAGAAAAAP/3AAYAAAA5
AAoAPwATAAYAAP////sABQAAAD8ACgBFAA8ABgAAAAD/+wAFAAIARQAKAEoAEQAGAAAAAP/4AAUA
AABKAAoATwASAAYAAAAA//gABQAAAE8ACgBUABIABgAAAAD/+AAFAAAAVAAKAFkAEgAGAAAAAP/5
AAUAAABZAAoAXgARAAYAAAAA//gABgAAAF4ACgBkABIABgAAAAD/+AAGAAAAZAAKAGoAEgAGAAAA
AP/4AAYAAABqAAoAcAASAAYAAAAA//kABgAAAHAACgB2ABEABgAAAAD/+AAFAAAAdgAKAHsAEgAG
AAD////4AAYAAAB7AAoAggASAAYAAAAA//gABQAAAIIACgCHABIABgAAAAD/+AAFAAAAhwAKAIwA
EgAGAAAAAP/4AAUAAACMAAoAkQASAAYAAAAA//gABQAAAJEACgCWABIABgAAAAD/+QAFAAAAlgAK
AJsAEQAGAAAAAP/6AAX//wCbAAoAoAAPAAYAAAAA//oABQABAKAACgClABEABgAA////+AAGAAAA
pQAKAKwAEgAGAAD////4AAYAAACsAAoAswASAAYAAP////gABgAAALMACgC6ABIABgAA////+QAG
AAAAugAKAMEAEQAGAAD////4AAYAAgDBAAoAyAAUAAYAAP////kABQACAMgACgDOABMABgAA////
+QAGAAIAzgAKANUAEw==
''')), Image.open(BytesIO(base64.b64decode(b'''
iVBORw0KGgoAAAANSUhEUgAAAx4AAAAUAQAAAAArMtZoAAAEwElEQVR4nABlAJr/AHVE4czCI/4u
Mc4b7vuds/xzjz5/3/7u/n9vMe7vnfH/9++vPn/xyf5zhxzjt8GHw8+2d83u8x27199/nxuQ6Od9
M43/5z2I+9n9ZtmDBwMQECDRQw/eQIQohJXxpBCNVE6QCCAAAAD//wBlAJr/AgALyj1t/wINwq0g
LeNZUworuN1cjTPIzrTX6ofHWeo3v336qPzfEwRmBnHTtf95/fglZK5N0PDgfRTslpGBvz7LFc4F
IUXBWQGjQ5MGCx34EDFPwXiY4YbYxavpnhHFrk14CDAAAAD//wBlAJr/AgKqRooH2gAgPeggvUAA
Bu2WfgPoAwzRAABAAAAAAACQgLz/3Uv4Gv+gX7BJgDeeGP6AAAD1NMDzKHD7ANWr3loYbxsAD791
NAADfcoIDyP44K/jv4Y63/Z+t98Ovt+ub4T48LAAAAD//wBlAJr/AuplMlADJAAAAGuAphWpqhMx
in0A/fRvAYBABPgBwBUgABBQ/sYAyv9g0bCHgOLoGAAAAAAAREAAwI7nr0ArYpow7aX8//9LaP/9
SjdavWA8ePHeBIKB//81/83ndznOaXx379wAAAD//wBlAJr/AqDxW+D3AABAAbUh/QMnbQag/gAY
AYDAAACgtgD/gOqAAAB5IA/8AAAk+n9w0AAA8AAAmFRJuPo27ciC0cD5oeW4E7KA/wD3ECMAn2tt
y8PgwH8AfAxFzC0JzeAMtratAsC/ffwAAAD//wBlAJr/BGKAyCAA4AAAAvgeYTAwHd1kmQF5chkG
ABoMIHcL5xVpTfQbUqzlAAAErwAQBgAAEOClA5D9il08AEh/tUzdCBsXkbgACED+woQg8Si9VeqY
lODCn7lmF6NhnAEYgAAA/NMIAAAAAAD//2JgjLZgVGBg5Pv/Tvpc8hwGBjYGJADjHDrAwPzAjv/H
/Wf3PzCwtzcwHmBgYGcwbZz8wHaCAQMDOwMDQ8MCBgYOC3W7mp+f0w+wHOYxO3OG+e376hsMZjk3
AAAAAP//YmCMY2A4wMAIN5e5gQETPD6AZisDAwMDgzSDAAPjByiHcQMDAwMDg1nOze1lByRu5/47
c4859311AYNZzg0AAAAA//9iYGDBYihOIIMuwIjGL39/fwffA8b//xv/P2BPtzzHwCBjUQAAAAD/
/yLFBrIBAAAA//9i1HhcwdhizX7u8NZNzyLbvT97bfrMf/QHI8evOwcSqGUJAAAA//9iYBB81iSw
pEE170Qrg5MIYydHqwdDQRMrAwcVrQAAAAD//2J4x7j9AAMDn8Q/BgYLBoaiAwwMjPdvMDBYM1Tv
oJodAAAAAP//Yqo/83+dxePWlxl3npsel9lvLfPcqlE9725C+acfVLMEAAAA//9i+s9gwCoaaGMR
evta/58PTEWzr21hufPjA8N+qlnBwAAAAAD//2JiWLci5v1+HmFXDqcnULE/MxgYGBj+f6CaJQAA
AAD//2Ji2FrkY3iYpYC5qDeGgeEMAwPDvwQBBoYvcTwOVLMEAAAA//9isDBgkP///0EOg9z35v//
Gc/eeW7BwPj5+QGZhANUswMAAAD//2JgqGBgYGBgqEMXlvhMPUsAAAAA//8iYDd1AAAAAP//AwDR
w7IkEbzhVQAAAABJRU5ErkJggg==
'''))))
return f
|
|
from inspect import isclass
from django.conf import settings
from django.core.files.storage import get_storage_class
from celery.datastructures import AttributeDict
from tower import ugettext_lazy as _
__all__ = ('LOG', 'LOG_BY_ID', 'LOG_KEEP',)
class _LOG(object):
action_class = None
class CREATE_ADDON(_LOG):
id = 1
action_class = 'add'
format = _(u'{addon} was created.')
keep = True
class EDIT_PROPERTIES(_LOG):
""" Expects: addon """
id = 2
action_class = 'edit'
format = _(u'{addon} properties edited.')
class EDIT_DESCRIPTIONS(_LOG):
id = 3
action_class = 'edit'
format = _(u'{addon} description edited.')
class EDIT_CATEGORIES(_LOG):
id = 4
action_class = 'edit'
format = _(u'Categories edited for {addon}.')
class ADD_USER_WITH_ROLE(_LOG):
id = 5
action_class = 'add'
format = _(u'{0.name} ({1}) added to {addon}.')
keep = True
class REMOVE_USER_WITH_ROLE(_LOG):
id = 6
action_class = 'delete'
# L10n: {0} is the user being removed, {1} is their role.
format = _(u'{0.name} ({1}) removed from {addon}.')
keep = True
class EDIT_CONTRIBUTIONS(_LOG):
id = 7
action_class = 'edit'
format = _(u'Contributions for {addon}.')
class USER_DISABLE(_LOG):
id = 8
format = _(u'{addon} disabled.')
keep = True
class USER_ENABLE(_LOG):
id = 9
format = _(u'{addon} enabled.')
keep = True
# TODO(davedash): Log these types when pages are present
class SET_PUBLIC_STATS(_LOG):
id = 10
format = _(u'Stats set public for {addon}.')
keep = True
# TODO(davedash): Log these types when pages are present
class UNSET_PUBLIC_STATS(_LOG):
id = 11
format = _(u'{addon} stats set to private.')
keep = True
class CHANGE_STATUS(_LOG):
id = 12
# L10n: {0} is the status
format = _(u'{addon} status changed to {0}.')
keep = True
class ADD_PREVIEW(_LOG):
id = 13
action_class = 'add'
format = _(u'Preview added to {addon}.')
class EDIT_PREVIEW(_LOG):
id = 14
action_class = 'edit'
format = _(u'Preview edited for {addon}.')
class DELETE_PREVIEW(_LOG):
id = 15
action_class = 'delete'
format = _(u'Preview deleted from {addon}.')
class ADD_VERSION(_LOG):
id = 16
action_class = 'add'
format = _(u'{version} added to {addon}.')
keep = True
class EDIT_VERSION(_LOG):
id = 17
action_class = 'edit'
format = _(u'{version} edited for {addon}.')
class DELETE_VERSION(_LOG):
id = 18
action_class = 'delete'
# Note, {0} is a string not a version since the version is deleted.
# L10n: {0} is the version number
format = _(u'Version {0} deleted from {addon}.')
keep = True
class ADD_FILE_TO_VERSION(_LOG):
id = 19
action_class = 'add'
format = _(u'File {0.name} added to {version} of {addon}.')
class DELETE_FILE_FROM_VERSION(_LOG):
"""
Expecting: addon, filename, version
Because the file is being deleted, filename and version
should be strings and not the object.
"""
id = 20
action_class = 'delete'
format = _(u'File {0} deleted from {version} of {addon}.')
class APPROVE_VERSION(_LOG):
id = 21
action_class = 'approve'
format = _(u'{addon} {version} approved.')
short = _(u'Approved')
keep = True
review_email_user = True
review_queue = True
class PRELIMINARY_VERSION(_LOG):
id = 42
action_class = 'approve'
format = _(u'{addon} {version} given preliminary review.')
short = _(u'Preliminarily approved')
keep = True
review_email_user = True
review_queue = True
class REJECT_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 43
action_class = 'reject'
format = _(u'{addon} {version} rejected.')
short = _(u'Rejected')
keep = True
review_email_user = True
review_queue = True
class RETAIN_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 22
format = _(u'{addon} {version} retained.')
short = _(u'Retained')
keep = True
review_email_user = True
review_queue = True
class ESCALATE_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 23
format = _(u'{addon} {version} escalated.')
short = _(u'Escalated')
keep = True
review_email_user = True
review_queue = True
class REQUEST_VERSION(_LOG):
# takes add-on, version, reviewtype
id = 24
format = _(u'{addon} {version} review requested.')
short = _(u'Review requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_INFORMATION(_LOG):
id = 44
format = _(u'{addon} {version} more information requested.')
short = _(u'More information requested')
keep = True
review_email_user = True
review_queue = True
class REQUEST_SUPER_REVIEW(_LOG):
id = 45
format = _(u'{addon} {version} super review requested.')
short = _(u'Super review requested')
keep = True
review_queue = True
class COMMENT_VERSION(_LOG):
id = 49
format = _(u'Comment on {addon} {version}.')
short = _(u'Comment')
keep = True
review_queue = True
hide_developer = True
class ADD_TAG(_LOG):
id = 25
action_class = 'tag'
format = _(u'{tag} added to {addon}.')
class REMOVE_TAG(_LOG):
id = 26
action_class = 'tag'
format = _(u'{tag} removed from {addon}.')
class ADD_TO_COLLECTION(_LOG):
id = 27
action_class = 'collection'
format = _(u'{addon} added to {collection}.')
class REMOVE_FROM_COLLECTION(_LOG):
id = 28
action_class = 'collection'
format = _(u'{addon} removed from {collection}.')
class ADD_REVIEW(_LOG):
id = 29
action_class = 'review'
format = _(u'{review} for {addon} written.')
# TODO(davedash): Add these when we do the admin site
class ADD_RECOMMENDED_CATEGORY(_LOG):
id = 31
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} featured in {0}.')
class REMOVE_RECOMMENDED_CATEGORY(_LOG):
id = 32
action_class = 'edit'
# L10n: {0} is a category name.
format = _(u'{addon} no longer featured in {0}.')
class ADD_RECOMMENDED(_LOG):
id = 33
format = _(u'{addon} is now featured.')
keep = True
class REMOVE_RECOMMENDED(_LOG):
id = 34
format = _(u'{addon} is no longer featured.')
keep = True
class ADD_APPVERSION(_LOG):
id = 35
action_class = 'add'
# L10n: {0} is the application, {1} is the version of the app
format = _(u'{0} {1} added.')
class CHANGE_USER_WITH_ROLE(_LOG):
""" Expects: author.user, role, addon """
id = 36
# L10n: {0} is a user, {1} is their role
format = _(u'{0.name} role changed to {1} for {addon}.')
keep = True
class CHANGE_POLICY(_LOG):
id = 38
action_class = 'edit'
format = _(u'{addon} policy changed.')
class CHANGE_ICON(_LOG):
id = 39
action_class = 'edit'
format = _(u'{addon} icon changed.')
class APPROVE_REVIEW(_LOG):
id = 40
action_class = 'approve'
format = _(u'{review} for {addon} approved.')
editor_format = _(u'{user} approved {review} for {addon}.')
keep = True
editor_event = True
class DELETE_REVIEW(_LOG):
"""Requires review.id and add-on objects."""
id = 41
action_class = 'review'
format = _(u'Review {review} for {addon} deleted.')
editor_format = _(u'{user} deleted {review} for {addon}.')
keep = True
editor_event = True
class MAX_APPVERSION_UPDATED(_LOG):
id = 46
format = _(u'Application max version for {version} updated.')
class BULK_VALIDATION_EMAILED(_LOG):
id = 47
format = _(u'Authors emailed about compatibility of {version}.')
class BULK_VALIDATION_USER_EMAILED(_LOG):
id = 130
format = _(u'Email sent to Author about add-on compatibility.')
class CHANGE_PASSWORD(_LOG):
id = 48
format = _(u'Password changed.')
class MAKE_PREMIUM(_LOG):
id = 50
format = _(u'{addon} changed to premium.')
class MANIFEST_UPDATED(_LOG):
id = 52
format = _(u'{addon} manifest updated.')
class APPROVE_VERSION_PRIVATE(_LOG):
id = 53
action_class = 'approve'
format = _(u'{addon} {version} approved but private.')
short = _(u'Approved but private')
keep = True
review_email_user = True
review_queue = True
class PURCHASE_ADDON(_LOG):
id = 54
format = _(u'{addon} purchased.')
class INSTALL_ADDON(_LOG):
id = 55
format = _(u'{addon} installed.')
class REFUND_REQUESTED(_LOG):
id = 56
format = _(u'Refund requested for {addon}')
class REFUND_DECLINED(_LOG):
id = 57
format = _(u'Refund declined for {addon} for {0}.')
class REFUND_GRANTED(_LOG):
id = 58
format = _(u'Refund granted for {addon} for {0}.')
class REFUND_INSTANT(_LOG):
id = 59
format = _(u'Instant refund granted for {addon}.')
class USER_EDITED(_LOG):
id = 60
format = _(u'Account updated.')
class RECEIPT_CHECKED(_LOG):
id = 65
format = _(u'Valid receipt was checked for {addon}.')
class ESCALATION_CLEARED(_LOG):
id = 66
format = _(u'Escalation cleared for {addon}.')
short = _(u'Escalation cleared')
keep = True
review_queue = True
class APP_DISABLED(_LOG):
id = 67
format = _(u'{addon} banned.')
short = _(u'App banned')
keep = True
review_queue = True
class ESCALATED_HIGH_ABUSE(_LOG):
id = 68
format = _(u'{addon} escalated because of high number of abuse reports.')
short = _(u'High Abuse Reports')
keep = True
review_queue = True
class ESCALATED_HIGH_REFUNDS(_LOG):
id = 69
format = _(u'{addon} escalated because of high number of refund requests.')
short = _(u'High Refund Requests')
keep = True
review_queue = True
class REREVIEW_MANIFEST_CHANGE(_LOG):
id = 70
format = _(u'{addon} re-reviewed because of manifest change.')
short = _(u'Manifest Change')
keep = True
review_queue = True
class REREVIEW_PREMIUM_TYPE_UPGRADE(_LOG):
id = 71
format = _(u'{addon} re-reviewed because app upgraded premium type.')
short = _(u'Premium Type Upgrade')
keep = True
review_queue = True
class REREVIEW_CLEARED(_LOG):
id = 72
format = _(u'Re-review cleared for {addon}.')
short = _(u'Re-review cleared')
keep = True
review_queue = True
class ESCALATE_MANUAL(_LOG):
id = 73
format = _(u'{addon} escalated by reviewer.')
short = _(u'Reviewer escalation')
keep = True
review_queue = True
# TODO(robhudson): Escalation log for editor escalation..
class VIDEO_ERROR(_LOG):
id = 74
format = _(u'Video removed from {addon} because of a problem with '
u'the video. ')
short = _(u'Video removed')
class REREVIEW_DEVICES_ADDED(_LOG):
id = 75
format = _(u'{addon} re-review because of new device(s) added.')
short = _(u'Device(s) Added')
keep = True
review_queue = True
class REVIEW_DEVICE_OVERRIDE(_LOG):
id = 76
format = _(u'{addon} device support manually changed by reviewer.')
short = _(u'Device(s) Changed by Reviewer')
keep = True
review_queue = True
class WEBAPP_RESUBMIT(_LOG):
id = 77
format = _(u'{addon} resubmitted for review.')
short = _(u'App Resubmission')
keep = True
review_queue = True
class ESCALATION_VIP_APP(_LOG):
id = 78
format = _(u'{addon} auto-escalated because its a VIP app.')
short = _(u'VIP auto-escalation')
keep = True
review_queue = True
class REREVIEW_MANIFEST_URL_CHANGE(_LOG):
id = 79
format = _(u'{addon} re-reviewed because of manifest URL change.')
short = _(u'Manifest URL Change')
keep = True
review_queue = True
class ESCALATION_PRERELEASE_APP(_LOG):
id = 80
format = _(u'{addon} auto-escalated because its a prerelease app.')
short = _(u'Prerelease auto-escalation')
keep = True
review_queue = True
class REREVIEW_ABUSE_APP(_LOG):
id = 81
format = _(
u'{addon} re-reviewed because abuse reports need investigation.')
short = _(u'Abuse reports investigation')
keep = True
review_queue = True
class REREVIEW_MANUAL(_LOG):
id = 82
format = _(u'{addon} manually flagged for re-review.')
short = _(u'Manual re-review')
keep = True
review_queue = True
class CUSTOM_TEXT(_LOG):
id = 98
format = '{0}'
class CUSTOM_HTML(_LOG):
id = 99
format = '{0}'
class OBJECT_ADDED(_LOG):
id = 100
format = _(u'Created: {0}.')
admin_event = True
class OBJECT_EDITED(_LOG):
id = 101
format = _(u'Edited field: {2} set to: {0}.')
admin_event = True
class OBJECT_DELETED(_LOG):
id = 102
format = _(u'Deleted: {1}.')
admin_event = True
class ADMIN_USER_EDITED(_LOG):
id = 103
format = _(u'User {user} edited, reason: {1}')
admin_event = True
class ADMIN_USER_ANONYMIZED(_LOG):
id = 104
format = _(u'User {user} anonymized.')
admin_event = True
class ADMIN_USER_RESTRICTED(_LOG):
id = 105
format = _(u'User {user} restricted.')
admin_event = True
class ADMIN_VIEWED_LOG(_LOG):
id = 106
format = _(u'Admin {0} viewed activity log for {user}.')
admin_event = True
class EDIT_REVIEW(_LOG):
id = 107
action_class = 'review'
format = _(u'{review} for {addon} updated.')
class THEME_REVIEW(_LOG):
id = 108
action_class = 'review'
format = _(u'{addon} reviewed.')
class GROUP_USER_ADDED(_LOG):
id = 120
action_class = 'access'
format = _(u'User {0.name} added to {group}.')
keep = True
admin_event = True
class GROUP_USER_REMOVED(_LOG):
id = 121
action_class = 'access'
format = _(u'User {0.name} removed from {group}.')
keep = True
admin_event = True
class REVIEW_FEATURES_OVERRIDE(_LOG):
id = 122
format = _(u'{addon} minimum requirements manually changed by reviewer.')
short = _(u'Requirements Changed by Reviewer')
keep = True
review_queue = True
class REREVIEW_FEATURES_CHANGED(_LOG):
id = 123
format = _(u'{addon} minimum requirements manually changed.')
short = _(u'Requirements Changed')
keep = True
review_queue = True
class CHANGE_VERSION_STATUS(_LOG):
id = 124
# L10n: {0} is the status
format = _(u'{version} status changed to {0}.')
keep = True
class DELETE_USER_LOOKUP(_LOG):
id = 125
# L10n: {0} is the status
format = _(u'User {0.name} {0.id} deleted via lookup tool.')
keep = True
class CONTENT_RATING_TO_ADULT(_LOG):
id = 126
format = _('{addon} content rating changed to Adult.')
review_queue = True
class CONTENT_RATING_CHANGED(_LOG):
id = 127
format = _('{addon} content rating changed.')
class PRIORITY_REVIEW_REQUESTED(_LOG):
id = 128
format = _(u'Priority review requested for {addon}.')
short = _(u'Priority Review')
keep = True
review_queue = True
class PASS_ADDITIONAL_REVIEW(_LOG):
id = 129
action_class = 'review'
format = _(u'{addon} {version} passed the {queue} review.')
review_queue = True
class FAIL_ADDITIONAL_REVIEW(_LOG):
id = 130
action_class = 'review'
format = _(u'{addon} {version} failed the {queue} review.')
review_queue = True
class APP_ABUSE_MARKREAD(_LOG):
"""Requires report.id and add-on objects."""
id = 131
format = _(u'Abuse report {report} for {addon} read.')
editor_format = _(u'{user} marked read {report} for {addon}.')
keep = True
editor_event = True
class WEBSITE_ABUSE_MARKREAD(_LOG):
"""Requires report.id and website objects."""
id = 132
format = _(u'Abuse report {report} for {website} read.')
editor_format = _(u'{user} marked read {report} for {website}.')
keep = True
editor_event = True
# Adding a log type? If it's a review_queue log type, you have to add a
# note_type to constants/comm.py.
LOGS = [x for x in vars().values()
if isclass(x) and issubclass(x, _LOG) and x != _LOG]
LOG_BY_ID = dict((l.id, l) for l in LOGS)
LOG = AttributeDict((l.__name__, l) for l in LOGS)
LOG_ADMINS = [l.id for l in LOGS if hasattr(l, 'admin_event')]
LOG_KEEP = [l.id for l in LOGS if hasattr(l, 'keep')]
LOG_EDITORS = [l.id for l in LOGS if hasattr(l, 'editor_event')]
LOG_REVIEW_QUEUE = [l.id for l in LOGS if hasattr(l, 'review_queue')]
# Is the user emailed the message?
LOG_REVIEW_EMAIL_USER = [l.id for l in LOGS if hasattr(l, 'review_email_user')]
# Logs *not* to show to the developer.
LOG_HIDE_DEVELOPER = [l.id for l in LOGS
if (getattr(l, 'hide_developer', False) or
l.id in LOG_ADMINS)]
def log(action, *args, **kw):
"""
e.g. mkt.log(mkt.LOG.CREATE_ADDON, []),
mkt.log(mkt.LOG.ADD_FILE_TO_VERSION, file, version)
"""
from mkt import get_user
from mkt.developers.models import (ActivityLog, ActivityLogAttachment,
AppLog, CommentLog, GroupLog, UserLog,
VersionLog)
from mkt.access.models import Group
from mkt.site.utils import log as logger_log
from mkt.webapps.models import Webapp
from mkt.users.models import UserProfile
from mkt.versions.models import Version
user = kw.get('user', get_user())
if not user:
logger_log.warning('Activity log called with no user: %s' % action.id)
return
al = ActivityLog(user=user, action=action.id)
al.arguments = args
if 'details' in kw:
al.details = kw['details']
al.save()
if 'details' in kw and 'comments' in al.details:
CommentLog(comments=al.details['comments'], activity_log=al).save()
# TODO(davedash): post-remora this may not be necessary.
if 'created' in kw:
al.created = kw['created']
# Double save necessary since django resets the created date on save.
al.save()
if 'attachments' in kw:
formset = kw['attachments']
storage = get_storage_class()()
for form in formset:
data = form.cleaned_data
if 'attachment' in data:
attachment = data['attachment']
storage.save('%s/%s' % (settings.REVIEWER_ATTACHMENTS_PATH,
attachment.name), attachment)
ActivityLogAttachment(activity_log=al,
description=data['description'],
mimetype=attachment.content_type,
filepath=attachment.name).save()
for arg in args:
if isinstance(arg, tuple):
if arg[0] == Webapp:
AppLog(addon_id=arg[1], activity_log=al).save()
elif arg[0] == Version:
VersionLog(version_id=arg[1], activity_log=al).save()
elif arg[0] == UserProfile:
UserLog(user_id=arg[1], activity_log=al).save()
elif arg[0] == Group:
GroupLog(group_id=arg[1], activity_log=al).save()
if isinstance(arg, Webapp):
AppLog(addon=arg, activity_log=al).save()
elif isinstance(arg, Version):
VersionLog(version=arg, activity_log=al).save()
elif isinstance(arg, UserProfile):
# Index by any user who is mentioned as an argument.
UserLog(activity_log=al, user=arg).save()
elif isinstance(arg, Group):
GroupLog(group=arg, activity_log=al).save()
# Index by every user
UserLog(activity_log=al, user=user).save()
return al
|
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from pkg_resources import resource_string, resource_listdir
from datetime import datetime
import os
import sys
import re
import argparse
join = lambda a, b: os.path.join(os.path.abspath(a), b)
TODAY = datetime.today().strftime("%Y%m%d")
GENERATOR_MESSAGES = {
'error': 'Generator failed to generate files',
'message': "Generating Folder...",
'lang': "Language found: %s",
'lang_error': "%s generator wasn't found",
'exists': "Dojo path already exists!",
'create': "New %s!",
}
GENERATORS_PATH = join(os.path.dirname(__file__), 'generators')
class GeneratorException(Exception):
value = GENERATOR_MESSAGES['error']
def __str__(self):
return repr(self.value)
class GeneratorNotFoundError(GeneratorException):
def __init__(self, language):
self.value = GENERATOR_MESSAGES['lang_error'] % language
class GeneratorPathExistsError(GeneratorException):
value = GENERATOR_MESSAGES['exists']
def sprint(text, show):
if show:
print(text)
def list_to_args(args, show=True):
opts = args.split(' ')
if len(opts) < 2:
raise GeneratorException()
n = argparse.Namespace()
n.language, n.problem = opts[:2]
n.extra = opts[2:]
n.path = os.path.curdir
return n
class Generator(object):
def __init__(self, args):
self.language = args.language
self.problem = args.problem
self.extra = ('_' + '_'.join(args.extra)) if args.extra else ''
self.path = args.path
self.ignore = args.ignore
self.today = TODAY
self.folder_name = "{date}{_extra}_{language}_{problem}".format(
date=TODAY,
_extra=self.extra,
language=self.language,
problem=self.problem,
)
self.folder_path = join(self.path, self.folder_name)
self.generator_path = join(GENERATORS_PATH, self.language)
splitted_name = self.problem.split('_')
snake_case = self.problem
pascal_case = ''.join(part.capitalize() for part in splitted_name)
down_case = ''.join(splitted_name)
camel_case = (
splitted_name.pop(0) +
''.join(part.capitalize() for part in splitted_name)
)
self.cases = {
'___dojogen___' : snake_case,
'___class_dojogen___' : pascal_case,
'___down_dojogen___' : down_case,
'___camel_dojogen___' : camel_case,
}
print(self.cases)
self.generated = False
def replace(self, text):
for sub, replace in self.cases.items():
text = re.sub(sub, replace, text)
return text
def copy_and_rename(self, current, folder_name, original):
isdir = lambda x: os.path.isdir(x) and not os.path.islink(x)
folder_path = join(current, folder_name)
os.mkdir(folder_path)
file_list = os.listdir(original)
for infile in file_list:
gen_path = join(original, infile)
if isdir(gen_path):
self.copy_and_rename(folder_path, infile, gen_path)
elif not '.pyc' in gen_path:
new_path = join(folder_path,self.replace(infile))
with open(new_path, 'w') as w:
with open(gen_path, 'r') as r:
for line in r:
w.write(self.replace(line))
def generate(self, show=True):
if not os.path.exists(self.folder_path):
sprint(GENERATOR_MESSAGES['message'], show)
if self.ignore:
os.mkdir(join(self.path, self.folder_name))
elif os.path.exists(self.generator_path):
sprint(GENERATOR_MESSAGES['lang'] % (self.language), show)
self.copy_and_rename(
self.path, self.folder_name, self.generator_path)
self.generated = True
else:
raise GeneratorNotFoundError(self.language)
sprint(GENERATOR_MESSAGES['create'] % self.folder_name, show)
else:
self.generated = True
raise GeneratorPathExistsError()
def generate(generate, directory):
if generate:
generator = Generator(list_to_args(generate))
try:
generator.generate()
finally:
if generator.generated:
return generator.folder_path
return directory
def generate_mode(args):
generator = Generator(args)
generator.generate()
def help_mode(args):
help_path = join(join(GENERATORS_PATH, "help"), args.language)
if os.path.exists(help_path):
with open(help_path, 'r') as f:
print(f.read())
else:
raise GeneratorNotFoundError(args.language)
def lang_mode(args):
for name in os.listdir(GENERATORS_PATH):
if os.path.isdir(join(GENERATORS_PATH, name)) and name != 'help':
print(name)
def main():
gen_msg = '''Generates directory for coding dojo following the pattern:
{}[_extra]_language_problem\n'''.format(TODAY)
help_msg = 'Describes how to prepare the environment for a language'
lang_msg = 'Shows existing generators'
dgen_msg, dhelp_msg, dlang_msg = gen_msg, help_msg, lang_msg
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
for name in ['generate', 'gen', 'g']: # Aliases on Python 2.x
parser_gen = subparsers.add_parser(
name, description=gen_msg, help=dgen_msg)
parser_gen.add_argument(
'language', type=str, help='Programming Language')
parser_gen.add_argument(
'problem', type=str, help='Problem Name')
parser_gen.add_argument(
'extra', type=str, nargs='*', default='', help='Extra identifier')
parser_gen.add_argument(
'--path', '-p', type=str, default=os.path.curdir,
help='Custom path')
parser_gen.add_argument(
'--ignore', '-i', action='store_true',
help='Do not generate files. Create only the folder')
parser_gen.set_defaults(func=generate_mode)
dgen_msg = '...'
for name in ['help', 'man', 'h']: # Aliases on Python 2.x
parser_help = subparsers.add_parser(
name, description=help_msg, help=dhelp_msg)
parser_help.add_argument(
'language', type=str, help='Programming Language')
parser_help.set_defaults(func=help_mode)
dhelp_msg = '...'
for name in ['language', 'l']: # Aliases on Python 2.x
parser_lang = subparsers.add_parser(
name, description=lang_msg, help=dlang_msg)
parser_lang.set_defaults(func=lang_mode)
dlang_msg = '...'
args, _ = parser.parse_known_args()
try:
args.func(args)
except GeneratorException as err:
print("Error: {0}".format(err))
if __name__ == '__main__':
main()
|
|
from collections import namedtuple
import lasagne as nn
from lasagne.layers.dnn import Conv2DDNNLayer, MaxPool2DDNNLayer
import data_iterators
import numpy as np
import theano.tensor as T
from functools import partial
import nn_heart
import utils_heart
from pathfinder import PKL_TRAIN_DATA_PATH, TRAIN_LABELS_PATH, PKL_VALIDATE_DATA_PATH
import data
import utils
caching = None
restart_from_save = None
rng = np.random.RandomState(42)
patch_size = (64, 64)
mm_patch_size = (128, 128)
train_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'rotation_range': (-180, 180),
'mask_roi': False,
'translation_range_x': (-10, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (1.2, 1.5),
'do_flip': (True, False),
'zoom_range': (1 / 1.5, 1.5),
'sequence_shift': False
}
valid_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'mask_roi': False
}
test_transformation_params = {
'patch_size': patch_size,
'mm_patch_size': mm_patch_size,
'rotation_range': (-180, 180),
'mask_roi': False,
'translation_range_x': (-10, 10),
'translation_range_y': (-10, 10),
'shear_range': (0, 0),
'roi_scale_range': (1.2, 1.5),
'do_flip': (True, False),
'zoom_range': (1., 1.),
'sequence_shift': False
}
data_prep_fun = data.transform_norm_rescale_after
batch_size = 32
nbatches_chunk = 12
chunk_size = batch_size * nbatches_chunk
train_valid_ids = utils.get_train_valid_split(PKL_TRAIN_DATA_PATH)
train_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=train_transformation_params,
patient_ids=train_valid_ids['train'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi.pkl',
full_batch=True, random=True, infinite=True,
view='4ch',
data_prep_fun=data_prep_fun)
valid_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_TRAIN_DATA_PATH,
batch_size=chunk_size,
transform_params=valid_transformation_params,
patient_ids=train_valid_ids['valid'],
labels_path=TRAIN_LABELS_PATH,
slice2roi_path='pkl_train_slice2roi.pkl',
full_batch=False, random=False, infinite=False,
view='4ch',
data_prep_fun=data_prep_fun)
test_data_iterator = data_iterators.SliceNormRescaleDataGenerator(data_path=PKL_VALIDATE_DATA_PATH,
batch_size=chunk_size,
transform_params=test_transformation_params,
slice2roi_path='pkl_validate_slice2roi.pkl',
full_batch=False, random=False, infinite=False,
view='4ch',
data_prep_fun=data_prep_fun)
nchunks_per_epoch = max(1, train_data_iterator.nsamples / chunk_size)
max_nchunks = nchunks_per_epoch * 300
learning_rate_schedule = {
0: 0.0001,
int(max_nchunks * 0.5): 0.00008,
int(max_nchunks * 0.6): 0.00004,
int(max_nchunks * 0.8): 0.00001,
int(max_nchunks * 0.9): 0.000005
}
validate_every = nchunks_per_epoch
save_every = nchunks_per_epoch
conv3 = partial(Conv2DDNNLayer,
stride=(1, 1),
pad="same",
filter_size=(3, 3),
nonlinearity=nn.nonlinearities.very_leaky_rectify,
b=nn.init.Constant(0.1),
W=nn.init.Orthogonal("relu"))
max_pool = partial(MaxPool2DDNNLayer,
pool_size=(2, 2),
stride=(2, 2))
def build_model(l_in=None):
l_in = nn.layers.InputLayer((None, 30) + patch_size) if not l_in else l_in
l = conv3(l_in, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=128)
l = conv3(l, num_filters=128)
l = max_pool(l)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = conv3(l, num_filters=256)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = conv3(l, num_filters=512)
l = max_pool(l)
l_d01 = nn.layers.DenseLayer(l, num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1),nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d02 = nn.layers.DenseLayer(nn.layers.dropout(l_d01, p=0.5), num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(100), nonlinearity=nn_heart.lb_softplus())
sigma0 = nn.layers.DenseLayer(nn.layers.dropout(l_d02, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(20), nonlinearity=nn_heart.lb_softplus())
l_cdf0 = nn_heart.NormalCDFLayer(mu0, sigma0)
# ---------------------------------------------------------------
l_d11 = nn.layers.DenseLayer(l, num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
l_d12 = nn.layers.DenseLayer(nn.layers.dropout(l_d11, p=0.5), num_units=512, W=nn.init.Orthogonal("relu"),
b=nn.init.Constant(0.1), nonlinearity=nn.nonlinearities.very_leaky_rectify)
mu1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(150), nonlinearity=nn_heart.lb_softplus())
sigma1 = nn.layers.DenseLayer(nn.layers.dropout(l_d12, p=0.5), num_units=1, W=nn.init.Orthogonal(),
b=nn.init.Constant(20), nonlinearity=nn_heart.lb_softplus())
l_cdf1 = nn_heart.NormalCDFLayer(mu1, sigma1)
l_outs = [l_cdf0, l_cdf1]
l_top = nn.layers.MergeLayer(l_outs)
l_target_mu0 = nn.layers.InputLayer((None, 1))
l_target_mu1 = nn.layers.InputLayer((None, 1))
l_targets = [l_target_mu0, l_target_mu1]
mu_layers = [mu0, mu1]
sigma_layers = [sigma0, sigma1]
return namedtuple('Model', ['l_ins', 'l_outs', 'l_targets', 'l_top', 'mu_layers', 'sigma_layers'])([l_in], l_outs,
l_targets, l_top,
mu_layers,
sigma_layers)
def build_objective(model, deterministic=False):
p0 = nn.layers.get_output(model.l_outs[0], deterministic=deterministic)
t0 = nn.layers.get_output(model.l_targets[0])
t0_heaviside = nn_heart.heaviside(t0)
crps0 = T.mean((p0 - t0_heaviside) ** 2)
p1 = nn.layers.get_output(model.l_outs[1], deterministic=deterministic)
t1 = nn.layers.get_output(model.l_targets[1])
t1_heaviside = nn_heart.heaviside(t1)
crps1 = T.mean((p1 - t1_heaviside) ** 2)
return 0.5 * (crps0 + crps1)
def build_updates(train_loss, model, learning_rate):
updates = nn.updates.adam(train_loss, nn.layers.get_all_params(model.l_top), learning_rate)
return updates
def get_mean_validation_loss(batch_predictions, batch_targets):
return [0, 0]
def get_mean_crps_loss(batch_predictions, batch_targets, batch_ids):
nbatches = len(batch_predictions)
npredictions = len(batch_predictions[0])
crpss = []
for i in xrange(npredictions):
p, t = [], []
for j in xrange(nbatches):
p.append(batch_predictions[j][i])
t.append(batch_targets[j][i])
p, t = np.vstack(p), np.vstack(t)
target_cdf = utils_heart.heaviside_function(t)
crpss.append(np.mean((p - target_cdf) ** 2))
return crpss
def get_avg_patient_predictions(batch_predictions, batch_patient_ids, mean):
return utils_heart.get_patient_average_cdf_predictions(batch_predictions, batch_patient_ids, mean)
|
|
"""The ``interface`` module provides functions to convert/cast between common
VTK and NumPy/Pandas data types. These methods provide a simple to use interface
for VTK data types so that users can make changes to VTK data strucutres via
Python data structures that are a bit easier to perform numerical operations
upon.
"""
__all__ = [
'get_vtk_type',
'convert_string_array',
'convert_array',
'data_frame_to_table',
'table_to_data_frame',
'place_array_in_table',
'get_dtypes',
'points_to_poly_data',
'add_arrays_from_data_frame',
'convert_cell_conn',
'get_array',
'get_data_dict',
]
__displayname__ = 'Interface'
import numpy as np
import pandas as pd
import vtk
from vtk.util import numpy_support as nps
import pyvista as pv
from pyvista.utilities import convert_string_array, get_vtk_type
from . import _helpers
def convert_array(arr, name='Data', deep=0, array_type=None, pdf=False):
"""A helper to convert a NumPy array to a vtkDataArray or vice versa
Args:
arr (ndarray or vtkDataArry) : A numpy array or vtkDataArry to convert
name (str): the name of the data array for VTK
deep (bool, int): if input is numpy array then deep copy values
pdf (bool): if input is vtkDataArry, make a pandas DataFrame of the array
Return:
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
num_data = pv.convert_array(arr, name=name, deep=deep, array_type=array_type)
if not isinstance(num_data, np.ndarray):
return num_data
if not pdf:
return num_data
return pd.DataFrame(data=num_data, columns=[arr.GetName()])
def data_frame_to_table(df, pdo=None):
"""Converts a pandas DataFrame to a vtkTable"""
if not isinstance(df, pd.DataFrame):
raise _helpers.PVGeoError('Input is not a pandas DataFrame')
if pdo is None:
pdo = pv.Table(df)
else:
pdo.DeepCopy(pv.Table(df))
return pdo
def table_to_data_frame(table):
"""Converts a vtkTable to a pandas DataFrame"""
if not isinstance(table, vtk.vtkTable):
raise _helpers.PVGeoError('Input is not a vtkTable')
if not isinstance(table, pv.Table):
table = pv.Table(table)
return table.to_pandas()
def place_array_in_table(ndarr, titles, pdo):
"""Takes a 1D/2D numpy array and makes a vtkTable of it
Args:
ndarr (numpy.ndarray) : The 1D/2D array to be converted to a table
titles (list or tuple): The titles for the arrays in the table. Must
have same number of elements as columns in input ndarray
pdo (vtkTable) : The output data object pointer
Return:
vtkTable : returns the same input pdo table
"""
# Put columns into table
if len(np.shape(ndarr)) > 2:
raise _helpers.PVGeoError(
'Input np.ndarray must be 1D or 2D to be converted to vtkTable.'
)
if len(np.shape(ndarr)) == 1:
# First check if it is an array full of tuples (varying type)
if isinstance(ndarr[0], (tuple, np.void)):
for i, title in enumerate(titles):
place_array_in_table(ndarr['f%d' % i], title, pdo)
return pv.wrap(pdo)
# Otherwise it is just a 1D array which needs to be 2D
else:
ndarr = np.reshape(ndarr, (-1, 1))
cols = np.shape(ndarr)[1]
for i in range(cols):
VTK_data = convert_array(ndarr[:, i])
VTK_data.SetName(titles[i])
pdo.AddColumn(VTK_data)
return pv.wrap(pdo)
def get_dtypes(dtype='', endian=None):
"""This converts char dtypes and an endian to a numpy and VTK data type.
Return:
tuple (numpy.dtype, int):
the numpy data type and the integer type id specified in vtkType.h
for VTK data types
"""
# If native `@` was chosen then do not pass an endian
if endian == '@':
# print('WARNING: Native endianness no longer supported for packed binary reader. Please chose `>` or `<`. This defaults to big `>`.')
endian = ''
# No endian specified:
elif endian is None:
endian = ''
# Get numpy and VTK data types and return them both
if dtype == 'd':
vtktype = vtk.VTK_DOUBLE
elif dtype == 'f':
vtktype = vtk.VTK_FLOAT
elif dtype == 'i':
vtktype = vtk.VTK_INT
else:
raise _helpers.PVGeoError('dtype \'%s\' unknown:' % dtype)
# Return data types
dtype = np.dtype('%s%s' % (endian, dtype))
return dtype, vtktype
def points_to_poly_data(points, copy_z=False):
"""Create ``vtkPolyData`` from a numpy array of XYZ points. If the points
have more than 3 dimensions, then all dimensions after the third will be
added as attributes. Assume the first three dimensions are the XYZ
coordinates.
Args:
points (np.ndarray or pandas.DataFrame): The points and pointdata
copy_z (bool): A flag on whether to append the z values as a PointData
array
Return:
vtkPolyData : points with point-vertex cells
"""
# This prevents an error that occurs when only one point is passed
if points.ndim < 2:
points = points.reshape((1, -1))
keys = ['Field %d' % i for i in range(points.shape[1] - 3)]
# Check if input is anything other than a NumPy array and cast it
# e.g. you could send a Pandas dataframe
if not isinstance(points, np.ndarray):
if isinstance(points, pd.DataFrame):
# If a pandas data frame, lets grab the keys
keys = points.keys()[3::]
points = np.array(points)
# If points are not 3D
if points.shape[1] < 2:
raise RuntimeError('Points must be 3D. Try adding a third dimension of zeros.')
atts = points[:, 3::]
points = points[:, 0:3].astype(np.float)
# Create polydata
pdata = pv.PolyData(points)
# Add attributes if given
scalSet = False
for i, key in enumerate(keys):
data = convert_array(atts[:, i], name=key)
pdata.GetPointData().AddArray(data)
if not scalSet:
pdata.GetPointData().SetActiveScalars(key)
scalSet = True
if copy_z:
z = convert_array(points[:, 2], name='Elevation')
pdata.GetPointData().AddArray(z)
return pv.wrap(pdata)
def add_arrays_from_data_frame(pdo, field, df):
"""Add all of the arrays from a given data frame to an output's data"""
for key in df.keys():
VTK_data = convert_array(df[key].values, name=key)
_helpers.add_array(pdo, field, VTK_data)
return pv.wrap(pdo)
def convert_cell_conn(cell_connectivity):
"""Converts cell connectivity arrays to a cell matrix array that makes sense
for VTK cell arrays.
"""
cellsMat = np.concatenate(
(
np.ones((cell_connectivity.shape[0], 1), dtype=np.int64)
* cell_connectivity.shape[1],
cell_connectivity,
),
axis=1,
).ravel()
return nps.numpy_to_vtk(cellsMat, deep=True, array_type=vtk.VTK_ID_TYPE)
def get_array(dataset, name, vtk_object=False):
"""Given an input dataset, this will return the named array as a NumPy array
or a vtkDataArray if spceified
"""
arr, field = _helpers.search_for_array(dataset, name)
if vtk_object:
return arr
return convert_array(arr)
def get_data_dict(dataset, field='cell'):
"""Given an input dataset, this will return all the arrays in that object's
cell/point/field/row data as named NumPy arrays in a dictionary.
"""
data = {}
for key in _helpers.get_all_array_names(dataset, field):
data[key] = np.array(_helpers.get_numpy_array(dataset, field, key))
return data
|
|
"""Tests for letsencrypt.renewer."""
import datetime
import pytz
import os
import tempfile
import shutil
import unittest
import configobj
import mock
from letsencrypt import configuration
from letsencrypt import errors
from letsencrypt.storage import ALL_FOUR
from letsencrypt.tests import test_util
CERT = test_util.load_cert('cert.pem')
def unlink_all(rc_object):
"""Unlink all four items associated with this RenewableCert."""
for kind in ALL_FOUR:
os.unlink(getattr(rc_object, kind))
def fill_with_sample_data(rc_object):
"""Put dummy data into all four files of this RenewableCert."""
for kind in ALL_FOUR:
with open(getattr(rc_object, kind), "w") as f:
f.write(kind)
class BaseRenewableCertTest(unittest.TestCase):
"""Base class for setting up Renewable Cert tests.
.. note:: It may be required to write out self.config for
your test. Check :class:`.cli_test.DuplicateCertTest` for an example.
"""
def setUp(self):
from letsencrypt import storage
self.tempdir = tempfile.mkdtemp()
self.cli_config = configuration.RenewerConfiguration(
namespace=mock.MagicMock(
config_dir=self.tempdir,
work_dir=self.tempdir,
logs_dir=self.tempdir,
)
)
# TODO: maybe provide RenewerConfiguration.make_dirs?
# TODO: main() should create those dirs, c.f. #902
os.makedirs(os.path.join(self.tempdir, "live", "example.org"))
os.makedirs(os.path.join(self.tempdir, "archive", "example.org"))
os.makedirs(os.path.join(self.tempdir, "renewal"))
config = configobj.ConfigObj()
for kind in ALL_FOUR:
config[kind] = os.path.join(self.tempdir, "live", "example.org",
kind + ".pem")
config.filename = os.path.join(self.tempdir, "renewal",
"example.org.conf")
config.write()
self.config = config
self.defaults = configobj.ConfigObj()
self.test_rc = storage.RenewableCert(config.filename, self.cli_config)
def tearDown(self):
shutil.rmtree(self.tempdir)
def _write_out_ex_kinds(self):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}11.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
class RenewableCertTests(BaseRenewableCertTest):
# pylint: disable=too-many-public-methods
"""Tests for letsencrypt.renewer.*."""
def test_initialization(self):
self.assertEqual(self.test_rc.lineagename, "example.org")
for kind in ALL_FOUR:
self.assertEqual(
getattr(self.test_rc, kind), os.path.join(
self.tempdir, "live", "example.org", kind + ".pem"))
def test_renewal_bad_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file doesn't end in ".conf"
"""
from letsencrypt import storage
broken = os.path.join(self.tempdir, "broken.conf")
with open(broken, "w") as f:
f.write("[No closing bracket for you!")
self.assertRaises(errors.CertStorageError, storage.RenewableCert,
broken, self.cli_config)
os.unlink(broken)
self.assertRaises(errors.CertStorageError, storage.RenewableCert,
"fun", self.cli_config)
def test_renewal_incomplete_config(self):
"""Test that the RenewableCert constructor will complain if
the renewal configuration file is missing a required file element."""
from letsencrypt import storage
config = configobj.ConfigObj()
config["cert"] = "imaginary_cert.pem"
# Here the required privkey is missing.
config["chain"] = "imaginary_chain.pem"
config["fullchain"] = "imaginary_fullchain.pem"
config.filename = os.path.join(self.tempdir, "imaginary_config.conf")
config.write()
self.assertRaises(errors.CertStorageError, storage.RenewableCert,
config.filename, self.cli_config)
def test_consistent(self):
# pylint: disable=too-many-statements,protected-access
oldcert = self.test_rc.cert
self.test_rc.cert = "relative/path"
# Absolute path for item requirement
self.assertFalse(self.test_rc._consistent())
self.test_rc.cert = oldcert
# Items must exist requirement
self.assertFalse(self.test_rc._consistent())
# Items must be symlinks requirements
fill_with_sample_data(self.test_rc)
self.assertFalse(self.test_rc._consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are relative
for kind in ALL_FOUR:
os.symlink(os.path.join("..", kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc._consistent())
unlink_all(self.test_rc)
# Items must point to desired place if they are absolute
for kind in ALL_FOUR:
os.symlink(os.path.join(self.tempdir, kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc._consistent())
unlink_all(self.test_rc)
# Items must point to things that exist
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "17.pem"),
getattr(self.test_rc, kind))
self.assertFalse(self.test_rc._consistent())
# This version should work
fill_with_sample_data(self.test_rc)
self.assertTrue(self.test_rc._consistent())
# Items must point to things that follow the naming convention
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain_17.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("wrongly-named fullchain")
self.assertFalse(self.test_rc._consistent())
def test_current_target(self):
# Relative path logic
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
# Absolute path logic
os.unlink(self.test_rc.cert)
os.symlink(os.path.join(self.tempdir, "archive", "example.org",
"cert17.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
self.assertTrue(os.path.samefile(self.test_rc.current_target("cert"),
os.path.join(self.tempdir, "archive",
"example.org",
"cert17.pem")))
def test_current_version(self):
for ver in (1, 5, 10, 20):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert{0}.pem".format(ver)),
self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert10.pem"), self.test_rc.cert)
self.assertEqual(self.test_rc.current_version("cert"), 10)
def test_no_current_version(self):
self.assertEqual(self.test_rc.current_version("cert"), None)
def test_latest_and_next_versions(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 5)
self.assertEqual(self.test_rc.next_free_version(), 6)
# Having one kind of file of a later version doesn't change the
# result
os.unlink(self.test_rc.privkey)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"privkey7.pem"), self.test_rc.privkey)
with open(self.test_rc.privkey, "w") as f:
f.write("privkey")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# ... although it does change the next free version
self.assertEqual(self.test_rc.next_free_version(), 8)
# Nor does having three out of four change the result
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert7.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
os.unlink(self.test_rc.fullchain)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"fullchain7.pem"), self.test_rc.fullchain)
with open(self.test_rc.fullchain, "w") as f:
f.write("fullchain")
self.assertEqual(self.test_rc.latest_common_version(), 5)
# If we have everything from a much later version, it does change
# the result
ver = 17
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(self.test_rc.latest_common_version(), 17)
self.assertEqual(self.test_rc.next_free_version(), 18)
def test_update_link_to(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
# pylint: disable=protected-access
self.test_rc._update_link_to("cert", 3)
self.test_rc._update_link_to("privkey", 2)
self.assertEqual(3, self.test_rc.current_version("cert"))
self.assertEqual(2, self.test_rc.current_version("privkey"))
self.assertEqual(5, self.test_rc.current_version("chain"))
self.assertEqual(5, self.test_rc.current_version("fullchain"))
# Currently we are allowed to update to a version that doesn't exist
self.test_rc._update_link_to("chain", 3000)
# However, current_version doesn't allow querying the resulting
# version (because it's a broken link).
self.assertEqual(os.path.basename(os.readlink(self.test_rc.chain)),
"chain3000.pem")
def test_version(self):
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write("cert")
# TODO: We should probably test that the directory is still the
# same, but it's tricky because we can get an absolute
# path out when we put a relative path in.
self.assertEqual("cert8.pem",
os.path.basename(self.test_rc.version("cert", 8)))
def test_update_all_links_to_success(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
self.assertEqual(self.test_rc.latest_common_version(), 5)
def test_update_all_links_to_partial_failure(self):
def unlink_or_raise(path, real_unlink=os.unlink):
# pylint: disable=missing-docstring
basename = os.path.basename(path)
if "fullchain" in basename and basename.startswith("prev"):
raise ValueError
else:
real_unlink(path)
self._write_out_ex_kinds()
with mock.patch("letsencrypt.storage.os.unlink") as mock_unlink:
mock_unlink.side_effect = unlink_or_raise
self.assertRaises(ValueError, self.test_rc.update_all_links_to, 12)
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.current_version(kind), 12)
def test_update_all_links_to_full_failure(self):
def unlink_or_raise(path, real_unlink=os.unlink):
# pylint: disable=missing-docstring
if "fullchain" in os.path.basename(path):
raise ValueError
else:
real_unlink(path)
self._write_out_ex_kinds()
with mock.patch("letsencrypt.storage.os.unlink") as mock_unlink:
mock_unlink.side_effect = unlink_or_raise
self.assertRaises(ValueError, self.test_rc.update_all_links_to, 12)
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.current_version(kind), 11)
def test_has_pending_deployment(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertEqual(ver, self.test_rc.current_version(kind))
for ver in xrange(1, 6):
self.test_rc.update_all_links_to(ver)
for kind in ALL_FOUR:
self.assertEqual(ver, self.test_rc.current_version(kind))
if ver < 5:
self.assertTrue(self.test_rc.has_pending_deployment())
else:
self.assertFalse(self.test_rc.has_pending_deployment())
def test_names(self):
# Trying the current version
test_cert = test_util.load_vector("cert-san.pem")
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert12.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.assertEqual(self.test_rc.names(),
["example.com", "www.example.com"])
# Trying a non-current version
test_cert = test_util.load_vector("cert.pem")
os.unlink(self.test_rc.cert)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"cert15.pem"), self.test_rc.cert)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.assertEqual(self.test_rc.names(12),
["example.com", "www.example.com"])
@mock.patch("letsencrypt.storage.datetime")
def test_time_interval_judgments(self, mock_datetime):
"""Test should_autodeploy() and should_autorenew() on the basis
of expiry time windows."""
test_cert = test_util.load_vector("cert.pem")
self._write_out_ex_kinds()
self.test_rc.update_all_links_to(12)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
self.test_rc.update_all_links_to(11)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
mock_datetime.timedelta = datetime.timedelta
for (current_time, interval, result) in [
# 2014-12-13 12:00:00+00:00 (about 5 days prior to expiry)
# Times that should result in autorenewal/autodeployment
(1418472000, "2 months", True), (1418472000, "1 week", True),
# Times that should not
(1418472000, "4 days", False), (1418472000, "2 days", False),
# 2009-05-01 12:00:00+00:00 (about 5 years prior to expiry)
# Times that should result in autorenewal/autodeployment
(1241179200, "7 years", True),
(1241179200, "11 years 2 months", True),
# Times that should not
(1241179200, "8 hours", False), (1241179200, "2 days", False),
(1241179200, "40 days", False), (1241179200, "9 months", False),
# 2015-01-01 (after expiry has already happened, so all
# intervals should cause autorenewal/autodeployment)
(1420070400, "0 seconds", True),
(1420070400, "10 seconds", True),
(1420070400, "10 minutes", True),
(1420070400, "10 weeks", True), (1420070400, "10 months", True),
(1420070400, "10 years", True), (1420070400, "99 months", True),
]:
sometime = datetime.datetime.utcfromtimestamp(current_time)
mock_datetime.datetime.utcnow.return_value = sometime
self.test_rc.configuration["deploy_before_expiry"] = interval
self.test_rc.configuration["renew_before_expiry"] = interval
self.assertEqual(self.test_rc.should_autodeploy(), result)
self.assertEqual(self.test_rc.should_autorenew(), result)
def test_autodeployment_is_enabled(self):
self.assertTrue(self.test_rc.autodeployment_is_enabled())
self.test_rc.configuration["autodeploy"] = "1"
self.assertTrue(self.test_rc.autodeployment_is_enabled())
self.test_rc.configuration["autodeploy"] = "0"
self.assertFalse(self.test_rc.autodeployment_is_enabled())
def test_should_autodeploy(self):
"""Test should_autodeploy() on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autodeployment turned off
self.test_rc.configuration["autodeploy"] = "0"
self.assertFalse(self.test_rc.should_autodeploy())
self.test_rc.configuration["autodeploy"] = "1"
# No pending deployment
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.assertFalse(self.test_rc.should_autodeploy())
def test_autorenewal_is_enabled(self):
self.assertTrue(self.test_rc.autorenewal_is_enabled())
self.test_rc.configuration["autorenew"] = "1"
self.assertTrue(self.test_rc.autorenewal_is_enabled())
self.test_rc.configuration["autorenew"] = "0"
self.assertFalse(self.test_rc.autorenewal_is_enabled())
@mock.patch("letsencrypt.storage.RenewableCert.ocsp_revoked")
def test_should_autorenew(self, mock_ocsp):
"""Test should_autorenew on the basis of reasons other than
expiry time window."""
# pylint: disable=too-many-statements
# Autorenewal turned off
self.test_rc.configuration["autorenew"] = "0"
self.assertFalse(self.test_rc.should_autorenew())
self.test_rc.configuration["autorenew"] = "1"
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}12.pem".format(kind)), where)
with open(where, "w") as f:
f.write(kind)
# Mandatory renewal on the basis of OCSP revocation
mock_ocsp.return_value = True
self.assertTrue(self.test_rc.should_autorenew())
mock_ocsp.return_value = False
def test_save_successor(self):
for ver in xrange(1, 6):
for kind in ALL_FOUR:
where = getattr(self.test_rc, kind)
if os.path.islink(where):
os.unlink(where)
os.symlink(os.path.join("..", "..", "archive", "example.org",
"{0}{1}.pem".format(kind, ver)), where)
with open(where, "w") as f:
f.write(kind)
self.test_rc.update_all_links_to(3)
self.assertEqual(6, self.test_rc.save_successor(3, "new cert", None,
"new chain"))
with open(self.test_rc.version("cert", 6)) as f:
self.assertEqual(f.read(), "new cert")
with open(self.test_rc.version("chain", 6)) as f:
self.assertEqual(f.read(), "new chain")
with open(self.test_rc.version("fullchain", 6)) as f:
self.assertEqual(f.read(), "new cert" + "new chain")
# version 6 of the key should be a link back to version 3
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 3)))
self.assertTrue(os.path.islink(self.test_rc.version("privkey", 6)))
# Let's try two more updates
self.assertEqual(7, self.test_rc.save_successor(6, "again", None,
"newer chain"))
self.assertEqual(8, self.test_rc.save_successor(7, "hello", None,
"other chain"))
# All of the subsequent versions should link directly to the original
# privkey.
for i in (6, 7, 8):
self.assertTrue(os.path.islink(self.test_rc.version("privkey", i)))
self.assertEqual("privkey3.pem", os.path.basename(os.readlink(
self.test_rc.version("privkey", i))))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind), range(1, 9))
self.assertEqual(self.test_rc.current_version(kind), 3)
# Test updating from latest version rather than old version
self.test_rc.update_all_links_to(8)
self.assertEqual(9, self.test_rc.save_successor(8, "last", None,
"attempt"))
for kind in ALL_FOUR:
self.assertEqual(self.test_rc.available_versions(kind),
range(1, 10))
self.assertEqual(self.test_rc.current_version(kind), 8)
with open(self.test_rc.version("fullchain", 9)) as f:
self.assertEqual(f.read(), "last" + "attempt")
# Test updating when providing a new privkey. The key should
# be saved in a new file rather than creating a new symlink.
self.assertEqual(10, self.test_rc.save_successor(9, "with", "a",
"key"))
self.assertTrue(os.path.exists(self.test_rc.version("privkey", 10)))
self.assertFalse(os.path.islink(self.test_rc.version("privkey", 10)))
def test_new_lineage(self):
"""Test for new_lineage() class method."""
from letsencrypt import storage
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert", "privkey", "chain", None,
self.defaults, self.cli_config)
# This consistency check tests most relevant properties about the
# newly created cert lineage.
# pylint: disable=protected-access
self.assertTrue(result._consistent())
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com.conf")))
with open(result.fullchain) as f:
self.assertEqual(f.read(), "cert" + "chain")
# Let's do it again and make sure it makes a different lineage
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2", None,
self.defaults, self.cli_config)
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com-0001.conf")))
# Now trigger the detection of already existing files
os.mkdir(os.path.join(
self.cli_config.live_dir, "the-lineage.com-0002"))
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"the-lineage.com", "cert3", "privkey3", "chain3",
None, self.defaults, self.cli_config)
os.mkdir(os.path.join(self.cli_config.archive_dir, "other-example.com"))
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"other-example.com", "cert4", "privkey4", "chain4",
None, self.defaults, self.cli_config)
# Make sure it can accept renewal parameters
params = {"stuff": "properties of stuff", "great": "awesome"}
result = storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2",
params, self.defaults, self.cli_config)
# TODO: Conceivably we could test that the renewal parameters actually
# got saved
def test_new_lineage_nonexistent_dirs(self):
"""Test that directories can be created if they don't exist."""
from letsencrypt import storage
shutil.rmtree(self.cli_config.renewal_configs_dir)
shutil.rmtree(self.cli_config.archive_dir)
shutil.rmtree(self.cli_config.live_dir)
storage.RenewableCert.new_lineage(
"the-lineage.com", "cert2", "privkey2", "chain2",
None, self.defaults, self.cli_config)
self.assertTrue(os.path.exists(
os.path.join(
self.cli_config.renewal_configs_dir, "the-lineage.com.conf")))
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.live_dir, "the-lineage.com", "privkey.pem")))
self.assertTrue(os.path.exists(os.path.join(
self.cli_config.archive_dir, "the-lineage.com", "privkey1.pem")))
@mock.patch("letsencrypt.storage.le_util.unique_lineage_name")
def test_invalid_config_filename(self, mock_uln):
from letsencrypt import storage
mock_uln.return_value = "this_does_not_end_with_dot_conf", "yikes"
self.assertRaises(errors.CertStorageError,
storage.RenewableCert.new_lineage,
"example.com", "cert", "privkey", "chain",
None, self.defaults, self.cli_config)
def test_bad_kind(self):
self.assertRaises(
errors.CertStorageError, self.test_rc.current_target, "elephant")
self.assertRaises(
errors.CertStorageError, self.test_rc.current_version, "elephant")
self.assertRaises(
errors.CertStorageError, self.test_rc.version, "elephant", 17)
self.assertRaises(
errors.CertStorageError,
self.test_rc.available_versions, "elephant")
self.assertRaises(
errors.CertStorageError,
self.test_rc.newest_available_version, "elephant")
# pylint: disable=protected-access
self.assertRaises(
errors.CertStorageError,
self.test_rc._update_link_to, "elephant", 17)
def test_ocsp_revoked(self):
# XXX: This is currently hardcoded to False due to a lack of an
# OCSP server to test against.
self.assertFalse(self.test_rc.ocsp_revoked())
def test_add_time_interval(self):
from letsencrypt import storage
# this month has 30 days, and the next year is a leap year
time_1 = pytz.UTC.fromutc(datetime.datetime(2003, 11, 20, 11, 59, 21))
# this month has 31 days, and the next year is not a leap year
time_2 = pytz.UTC.fromutc(datetime.datetime(2012, 10, 18, 21, 31, 16))
# in different time zone (GMT+8)
time_3 = pytz.timezone('Asia/Shanghai').fromutc(
datetime.datetime(2015, 10, 26, 22, 25, 41))
intended = {
(time_1, ""): time_1,
(time_2, ""): time_2,
(time_3, ""): time_3,
(time_1, "17 days"): time_1 + datetime.timedelta(17),
(time_2, "17 days"): time_2 + datetime.timedelta(17),
(time_1, "30"): time_1 + datetime.timedelta(30),
(time_2, "30"): time_2 + datetime.timedelta(30),
(time_1, "7 weeks"): time_1 + datetime.timedelta(49),
(time_2, "7 weeks"): time_2 + datetime.timedelta(49),
# 1 month is always 30 days, no matter which month it is
(time_1, "1 month"): time_1 + datetime.timedelta(30),
(time_2, "1 month"): time_2 + datetime.timedelta(31),
# 1 year could be 365 or 366 days, depends on the year
(time_1, "1 year"): time_1 + datetime.timedelta(366),
(time_2, "1 year"): time_2 + datetime.timedelta(365),
(time_1, "1 year 1 day"): time_1 + datetime.timedelta(367),
(time_2, "1 year 1 day"): time_2 + datetime.timedelta(366),
(time_1, "1 year-1 day"): time_1 + datetime.timedelta(365),
(time_2, "1 year-1 day"): time_2 + datetime.timedelta(364),
(time_1, "4 years"): time_1 + datetime.timedelta(1461),
(time_2, "4 years"): time_2 + datetime.timedelta(1461),
}
for parameters, excepted in intended.items():
base_time, interval = parameters
self.assertEqual(storage.add_time_interval(base_time, interval),
excepted)
@mock.patch("letsencrypt.renewer.plugins_disco")
@mock.patch("letsencrypt.account.AccountFileStorage")
@mock.patch("letsencrypt.client.Client")
def test_renew(self, mock_c, mock_acc_storage, mock_pd):
from letsencrypt import renewer
test_cert = test_util.load_vector("cert-san.pem")
for kind in ALL_FOUR:
os.symlink(os.path.join("..", "..", "archive", "example.org",
kind + "1.pem"),
getattr(self.test_rc, kind))
fill_with_sample_data(self.test_rc)
with open(self.test_rc.cert, "w") as f:
f.write(test_cert)
# Fails because renewalparams are missing
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"] = {"some": "stuff"}
# Fails because there's no authenticator specified
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["rsa_key_size"] = "2048"
self.test_rc.configfile["renewalparams"]["server"] = "acme.example.com"
self.test_rc.configfile["renewalparams"]["authenticator"] = "fake"
self.test_rc.configfile["renewalparams"]["tls_sni_01_port"] = "4430"
self.test_rc.configfile["renewalparams"]["http01_port"] = "1234"
self.test_rc.configfile["renewalparams"]["account"] = "abcde"
self.test_rc.configfile["renewalparams"]["domains"] = ["example.com"]
self.test_rc.configfile["renewalparams"]["config_dir"] = "config"
self.test_rc.configfile["renewalparams"]["work_dir"] = "work"
self.test_rc.configfile["renewalparams"]["logs_dir"] = "logs"
mock_auth = mock.MagicMock()
mock_pd.PluginsRegistry.find_all.return_value = {"apache": mock_auth}
# Fails because "fake" != "apache"
self.assertFalse(renewer.renew(self.test_rc, 1))
self.test_rc.configfile["renewalparams"]["authenticator"] = "apache"
mock_client = mock.MagicMock()
# pylint: disable=star-args
mock_client.obtain_certificate.return_value = (
mock.MagicMock(body=CERT), [CERT], mock.Mock(pem="key"),
mock.sentinel.csr)
mock_c.return_value = mock_client
self.assertEqual(2, renewer.renew(self.test_rc, 1))
# TODO: We could also make several assertions about calls that should
# have been made to the mock functions here.
mock_acc_storage().load.assert_called_once_with(account_id="abcde")
mock_client.obtain_certificate.return_value = (
mock.sentinel.certr, [], mock.sentinel.key, mock.sentinel.csr)
# This should fail because the renewal itself appears to fail
self.assertFalse(renewer.renew(self.test_rc, 1))
def _common_cli_args(self):
return [
"--config-dir", self.cli_config.config_dir,
"--work-dir", self.cli_config.work_dir,
"--logs-dir", self.cli_config.logs_dir,
]
@mock.patch("letsencrypt.renewer.notify")
@mock.patch("letsencrypt.storage.RenewableCert")
@mock.patch("letsencrypt.renewer.renew")
def test_main(self, mock_renew, mock_rc, mock_notify):
from letsencrypt import renewer
mock_rc_instance = mock.MagicMock()
mock_rc_instance.should_autodeploy.return_value = True
mock_rc_instance.should_autorenew.return_value = True
mock_rc_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_rc_instance
with open(os.path.join(self.cli_config.renewal_configs_dir,
"example.org.conf"), "w") as f:
# This isn't actually parsed in this test; we have a separate
# test_initialization that tests the initialization, assuming
# that configobj can correctly parse the config file.
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
with open(os.path.join(self.cli_config.renewal_configs_dir,
"example.com.conf"), "w") as f:
f.write("cert = cert.pem\nprivkey = privkey.pem\n")
f.write("chain = chain.pem\nfullchain = fullchain.pem\n")
renewer.main(cli_args=self._common_cli_args())
self.assertEqual(mock_rc.call_count, 2)
self.assertEqual(mock_rc_instance.update_all_links_to.call_count, 2)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
# If we have instances that don't need any work done, no work should
# be done (call counts associated with processing deployments or
# renewals should not increase).
mock_happy_instance = mock.MagicMock()
mock_happy_instance.should_autodeploy.return_value = False
mock_happy_instance.should_autorenew.return_value = False
mock_happy_instance.latest_common_version.return_value = 10
mock_rc.return_value = mock_happy_instance
renewer.main(cli_args=self._common_cli_args())
self.assertEqual(mock_rc.call_count, 4)
self.assertEqual(mock_happy_instance.update_all_links_to.call_count, 0)
self.assertEqual(mock_notify.notify.call_count, 4)
self.assertEqual(mock_renew.call_count, 2)
def test_bad_config_file(self):
from letsencrypt import renewer
os.unlink(os.path.join(self.cli_config.renewal_configs_dir,
"example.org.conf"))
with open(os.path.join(self.cli_config.renewal_configs_dir,
"bad.conf"), "w") as f:
f.write("incomplete = configfile\n")
renewer.main(cli_args=self._common_cli_args())
# The errors.CertStorageError is caught inside and nothing happens.
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
"""
GAM datasets
"""
# -*- coding: utf-8 -*-
from os.path import dirname
import pandas as pd
import numpy as np
from pygam.utils import make_2d
PATH = dirname(__file__)
def _clean_X_y(X, y):
"""ensure that X and y data are float and correct shapes
"""
return make_2d(X, verbose=False).astype('float'), y.astype('float')
def mcycle(return_X_y=True):
"""motorcyle acceleration dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the times after the impact.
y contains the acceleration.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/MASS/mcycle.html
"""
# y is real
# recommend LinearGAM
motor = pd.read_csv(PATH + '/mcycle.csv', index_col=0)
if return_X_y:
X = motor.times.values
y = motor.accel
return _clean_X_y(X, y)
return motor
def coal(return_X_y=True):
"""coal-mining accidents dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 150 bins has been computed describing the number accidents per year.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/boot/coal.html
"""
# y is counts
# recommend PoissonGAM
coal = pd.read_csv(PATH + '/coal.csv', index_col=0)
if return_X_y:
y, x = np.histogram(coal.values, bins=150)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return coal
def faithful(return_X_y=True):
"""old-faithful dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
The (X, y) tuple is a processed version of the otherwise raw DataFrame.
A histogram of 200 bins has been computed describing the wating time between eruptions.
X contains the midpoints of histogram bins.
y contains the count in each histogram bin.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/faithful.html
"""
# y is counts
# recommend PoissonGAM
faithful = pd.read_csv(PATH + '/faithful.csv', index_col=0)
if return_X_y:
y, x = np.histogram(faithful['eruptions'], bins=200)
X = x[:-1] + np.diff(x)/2 # get midpoints of bins
return _clean_X_y(X, y)
return faithful
def wage(return_X_y=True):
"""wage dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the year, age and education of each sampled person.
The education category has been transformed to integers.
y contains the wage.
Source:
https://github.com/JWarmenhoven/ISLR-python/blob/master/Notebooks/Data/Wage.csv
"""
# y is real
# recommend LinearGAM
wage = pd.read_csv(PATH + '/wage.csv', index_col=0)
if return_X_y:
X = wage[['year', 'age', 'education']].values
X[:,-1] = np.unique(X[:,-1], return_inverse=True)[1]
y = wage['wage'].values
return _clean_X_y(X, y)
return wage
def trees(return_X_y=True):
"""cherry trees dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the girth and the height of each tree.
y contains the volume.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/datasets/trees.html
"""
# y is real.
# recommend InvGaussGAM, or GAM(distribution='gamma', link='log')
trees = pd.read_csv(PATH + '/trees.csv', index_col=0)
if return_X_y:
y = trees.Volume.values
X = trees[['Girth', 'Height']].values
return _clean_X_y(X, y)
return trees
def default(return_X_y=True):
"""credit default dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of student or not, credit card balance,
and income.
y contains the outcome of default (0) or not (1).
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/ISLR/Default.html
"""
# y is binary
# recommend LogisticGAM
default = pd.read_csv(PATH + '/default.csv', index_col=0)
if return_X_y:
default = default.values
default[:,0] = np.unique(default[:,0], return_inverse=True)[1]
default[:,1] = np.unique(default[:,1], return_inverse=True)[1]
X = default[:,1:]
y = default[:,0]
return _clean_X_y(X, y)
return default
def cake(return_X_y=True):
"""cake dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the category of recipe used transformed to an integer,
the catergory of replicate, and the temperatue.
y contains the angle at which the cake broke.
Source:
https://vincentarelbundock.github.io/Rdatasets/doc/lme4/cake.html
"""
# y is real
# recommend LinearGAM
cake = pd.read_csv(PATH + '/cake.csv', index_col=0)
if return_X_y:
X = cake[['recipe', 'replicate', 'temperature']].values
X[:,0] = np.unique(cake.values[:,1], return_inverse=True)[1]
X[:,1] -= 1
y = cake['angle'].values
return _clean_X_y(X, y)
return cake
def hepatitis(return_X_y=True):
"""hepatitis in Bulgaria dataset
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age of each patient group.
y contains the ratio of HAV positive patients to the total number for each
age group.
Groups with 0 total patients are excluded.
Source:
Keiding, N. (1991) Age-specific incidence and prevalence: a statistical perspective
"""
# y is real
# recommend LinearGAM
hep = pd.read_csv(PATH + '/hepatitis_A_bulgaria.csv').astype(float)
if return_X_y:
# eliminate 0/0
mask = (hep.total > 0).values
hep = hep[mask]
X = hep.age.values
y = hep.hepatitis_A_positive.values / hep.total.values
return _clean_X_y(X, y)
return hep
def toy_classification(return_X_y=True, n=5000):
"""toy classification dataset with irrelevant features
fitting a logistic model on this data and performing a model summary
should reveal that features 2,3,4 are not significant.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, default: 5000
number of samples to generate
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains 5 variables:
continuous feature 0
continuous feature 1
irrelevant feature 0
irrelevant feature 1
irrelevant feature 2
categorical feature 0
y contains binary labels
Also, this dataset is randomly generated and will vary each time.
"""
# make features
X = np.random.rand(n,5) * 10 - 5
cat = np.random.randint(0,4, n)
X = np.c_[X, cat]
# make observations
log_odds = (-0.5*X[:,0]**2) + 5 +(-0.5*X[:,1]**2) + np.mod(X[:,-1], 2)*-30
p = 1/(1+np.exp(-log_odds)).squeeze()
y = (np.random.rand(n) < p).astype(np.int)
if return_X_y:
return X, y
else:
return pd.DataFrame(np.c_[X, y], columns=[['continuous0',
'continuous1',
'irrelevant0',
'irrelevant1',
'irrelevant2',
'categorical0',
'observations'
]])
def head_circumference(return_X_y=True):
"""head circumference for dutch boys
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains the age in years of each patient.
y contains the head circumference in centimeters
"""
# y is real
# recommend ExpectileGAM
head = pd.read_csv(PATH + '/head_circumference.csv', index_col=0).astype(float)
if return_X_y:
y = head['head'].values
X = head[['age']].values
return _clean_X_y(X, y)
return head
def chicago(return_X_y=True):
"""Chicago air pollution and death rate data
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['time', 'tmpd', 'pm10median', 'o3median']], with no NaNs
y contains 'death', the deaths per day, with no NaNs
Source:
R gamair package
`data(chicago)`
Notes
-----
https://cran.r-project.org/web/packages/gamair/gamair.pdf
https://rdrr.io/cran/gamair/man/chicago.html
Columns:
death : total deaths (per day).
pm10median : median particles in 2.5-10 per cubic m
pm25median : median particles < 2.5 mg per cubic m (more dangerous).
o3median : Ozone in parts per billion
so2median : Median Sulpher dioxide measurement
time : time in days
tmpd : temperature in fahrenheit
"""
# recommend PoissonGAM
chi = pd.read_csv(PATH + '/chicago.csv', index_col=0).astype(float)
if return_X_y:
chi = chi[['time', 'tmpd', 'pm10median', 'o3median', 'death']].dropna()
X = chi[['time', 'tmpd', 'pm10median', 'o3median']].values
y = chi['death'].values
return X, y
else:
return chi
def toy_interaction(return_X_y=True, n=50000, stddev=0.1):
"""a sinusoid modulated by a linear function
this is a simple dataset to test a model's capacity to fit interactions
between features.
a GAM with no interaction terms will have an R-squared close to 0,
while a GAM with a tensor product will have R-squared close to 1.
the data is random, and will vary on each invocation.
Parameters
----------
return_X_y : bool,
if True, returns a model-ready tuple of data (X, y)
otherwise, returns a Pandas DataFrame
n : int, optional
number of points to generate
stddev : positive float, optional,
standard deviation of irreducible error
Returns
-------
model-ready tuple of data (X, y)
OR
Pandas DataFrame
Notes
-----
X contains [['sinusoid', 'linear']]
y is formed by multiplying the sinusoid by the linear function.
Source:
"""
X = np.random.uniform(-1,1, size=(n, 2))
X[:, 1] *= 5
y = np.sin(X[:,0] * 2 * np.pi * 1.5) * X[:,1]
y += np.random.randn(len(X)) * stddev
if return_X_y:
return X, y
else:
data = pd.DataFrame(np.c_[X, y])
data.columns = [['sinusoid', 'linear', 'y']]
return data
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import io
import json
import math
import os
import tempfile
import urllib
from oslo.config import cfg
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
from cinder.openstack.common import units
from cinder.volume import driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
rbd_opts = [
cfg.StrOpt('rbd_pool',
default='rbd',
help='The RADOS pool where rbd volumes are stored'),
cfg.StrOpt('rbd_user',
default=None,
help='The RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='The libvirt uuid of the secret for the rbd_user '
'volumes'),
cfg.StrOpt('volume_tmp_dir',
default=None,
help='Directory where temporary image files are stored '
'when the volume driver does not write them directly '
'to the volume.'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
'cloning.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help=_('Volumes will be chunked into objects of this size '
'(in megabytes).')),
cfg.IntOpt('rados_connect_timeout', default=-1,
help=_('Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'))
]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = strutils.safe_encode(pool)
self.user = strutils.safe_encode(user)
self.conf = strutils.safe_encode(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return ''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_("flush() not supported in this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
if snapshot is not None:
snapshot = strutils.safe_encode(snapshot)
try:
self.volume = driver.rbd.Image(ioctx, strutils.safe_encode(name),
snapshot=snapshot,
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class RBDDriver(driver.VolumeDriver):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.1.0'
def __init__(self, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(rbd_opts)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
# All string args used with librbd must be None or utf-8 otherwise
# librbd will break.
for attr in ['rbd_user', 'rbd_ceph_conf', 'rbd_pool']:
val = getattr(self.configuration, attr)
if val is not None:
setattr(self.configuration, attr, strutils.safe_encode(val))
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
try:
with RADOSClient(self):
pass
except self.rados.Error:
msg = _('error connecting to ceph cluster')
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _ceph_args(self):
args = []
if self.configuration.rbd_user:
args.extend(['--id', self.configuration.rbd_user])
if self.configuration.rbd_ceph_conf:
args.extend(['--conf', self.configuration.rbd_ceph_conf])
return args
def _connect_to_rados(self, pool=None):
LOG.debug("opening connection to ceph cluster (timeout=%s)." %
(self.configuration.rados_connect_timeout))
client = self.rados.Rados(rados_id=self.configuration.rbd_user,
conffile=self.configuration.rbd_ceph_conf)
if pool is not None:
pool = strutils.safe_encode(pool)
else:
pool = self.configuration.rbd_pool
try:
if self.configuration.rados_connect_timeout >= 0:
client.connect(timeout=
self.configuration.rados_connect_timeout)
else:
client.connect()
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error as exc:
LOG.error("error connecting to ceph cluster.")
# shutdown cannot raise an exception
client.shutdown()
raise exception.VolumeBackendAPIException(data=str(exc))
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
stats['total_capacity_gb'] = new_stats['kb'] / units.Mi
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones (if any) of the given volume.
"""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
pool, parent, snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > CONF.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(CONF.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = strutils.safe_encode(src_vref['name'])
dest_name = strutils.safe_encode(volume['name'])
flatten_parent = False
# Do full copy if requested
if CONF.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == CONF.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume" %
(CONF.rbd_max_clone_depth))
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug("flattening source volume %s" % (src_name))
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug("creating snapshot='%s'" % (clone_snap))
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception as exc:
# Only close if exception since we still need it.
src_volume.close()
raise exc
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'" %
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.rbd.RBD().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=self.rbd.RBD_FEATURE_LAYERING)
except Exception as exc:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise exc
finally:
src_volume.close()
LOG.debug("clone created successfully")
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
size = 100 * units.Mi
else:
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'" % (volume['name']))
old_format = True
features = 0
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
with RADOSClient(self) as client:
self.rbd.RBD().create(client.ioctx,
strutils.safe_encode(volume['name']),
size,
order,
old_format=old_format,
features=features)
def _flatten(self, pool, volume_name):
LOG.debug('flattening %(pool)s/%(img)s' %
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s' %
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.rbd.RBD().clone(src_client.ioctx,
strutils.safe_encode(src_image),
strutils.safe_encode(src_snap),
dest_client.ioctx,
strutils.safe_encode(volume['name']),
features=self.rbd.RBD_FEATURE_LAYERING)
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume['size']) * units.Gi
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume['name'])
if int(volume['size']):
self._resize(volume)
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
snap and volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
snap and volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("volume %s is not a clone" % volume_name)
volume.set_snap(None)
return (None, None, None)
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s" % (parent_snap))
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug("deleting parent %s" % (parent_name))
self.rbd.RBD().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = strutils.safe_encode(volume['name'])
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_("volume %s no longer exists in backend")
% (volume_name))
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
if clone_snap is None:
LOG.debug("deleting rbd volume %s" % (volume_name))
try:
self.rbd.RBD().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warn(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.rbd.RBD().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
snap = strutils.safe_encode(snapshot['name'])
volume.create_snap(snap)
if self._supports_layering():
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = strutils.safe_encode(snapshot['volume_name'])
snap_name = strutils.safe_encode(snapshot['name'])
with RBDVolumeProxy(self, volume_name) as volume:
if self._supports_layering():
try:
volume.unprotect_snap(snap_name)
except self.rbd.ImageBusy:
raise exception.SnapshotIsBusy(snapshot_name=snap_name)
volume.remove_snap(snap_name)
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume['name']),
'hosts': hosts,
'ports': ports,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid, }
}
LOG.debug('connection data: %s', data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = map(urllib.unquote, location[len(prefix):].split('/'))
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s', e)
return False
if self._get_fsid() != fsid:
reason = _('%s is in a different ceph cluster') % image_location
LOG.debug(reason)
return False
if image_meta['disk_format'] != 'raw':
reason = _("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
image_location, image_meta['disk_format'])
LOG.debug(reason)
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' %
dict(loc=image_location, err=e))
return False
def clone_image(self, volume, image_location, image_id, image_meta):
image_location = image_location[0] if image_location else None
if image_location is None or not self._is_cloneable(
image_location, image_meta):
return ({}, False)
prefix, pool, image, snapshot = self._parse_location(image_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
def _ensure_tmp_exists(self):
tmp_dir = self.configuration.volume_tmp_dir
if tmp_dir and not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
def copy_image_to_volume(self, context, volume, image_service, image_id):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume['size'])
self.delete_volume(volume)
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp.name, volume['name']]
if self._supports_layering():
args.append('--new-format')
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
self._ensure_tmp_exists()
tmp_dir = self.configuration.volume_tmp_dir or '/tmp'
tmp_file = os.path.join(tmp_dir,
volume['name'] + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume['name'], tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug("volume restore complete.")
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume['size']
try:
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def manage_existing(self, volume, existing_ref):
"""Manages an existing image.
Renames the image name to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Raise an exception if we didn't find a suitable rbd image.
with RADOSClient(self) as client:
rbd_name = existing_ref['source-name']
self.rbd.RBD().rename(client.ioctx, strutils.safe_encode(rbd_name),
strutils.safe_encode(volume['name']))
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing image for manage_existing.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
rbd_name = strutils.safe_encode(existing_ref['source-name'])
with RADOSClient(self) as client:
# Raise an exception if we didn't find a suitable rbd image.
try:
rbd_image = self.rbd.Image(client.ioctx, rbd_name)
image_size = rbd_image.size()
except self.rbd.ImageNotFound:
kwargs = {'existing_ref': rbd_name,
'reason': 'Specified rbd image does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
finally:
rbd_image.close()
# RBD image size is returned in bytes. Attempt to parse
# size as a float and round up to the next integer.
try:
convert_size = int(math.ceil(int(image_size))) / units.Gi
return convert_size
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size "
"%(size)s was not a floating-point"
" number.")
% {'name': rbd_name,
'size': image_size})
raise exception.VolumeBackendAPIException(
data=exception_message)
|
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import os
import pickle
import random
import sys
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from typing import List, Optional, Tuple
from unittest.mock import patch
import mxnet as mx
import numpy as np
import sockeye.average
import sockeye.constants as C
import sockeye.image_captioning.captioner
import sockeye.image_captioning.extract_features
import sockeye.image_captioning.train
from sockeye.evaluate import raw_corpus_bleu, raw_corpus_chrf
try: # Try to import pillow
from PIL import Image # pylint: disable=import-error
except ImportError as e:
raise RuntimeError("Please install pillow.")
logger = logging.getLogger(__name__)
_DIGITS = "0123456789"
_IMAGE_SHAPE = (100, 100, 3)
_CNN_INPUT_IMAGE_SHAPE = (3, 224, 224)
_FEATURE_SHAPE = (2048, 7, 7)
def generate_img_or_feat(filename, use_features):
if not use_features:
imarray = np.random.rand(*_IMAGE_SHAPE) * 255
im = Image.fromarray(imarray.astype('uint8'))
im.save(filename)
else:
data = np.random.rand(*_FEATURE_SHAPE)
np.save(filename, data)
def generate_img_text_experiment_files(
source_list: List[str],
work_dir: str,
source_path: str,
target_path: str,
line_length: int = 9,
use_features: bool = False,
seed=13):
random_gen = random.Random(seed)
with open(source_path, "w") as source_out, open(target_path, "w") as target_out:
source_list_img = []
for s in source_list:
if not use_features:
filename = s + ".jpg"
else:
filename = s + ".npy"
source_list_img.append(os.path.join(work_dir, filename))
print(filename, file=source_out)
digits = [random_gen.choice(_DIGITS) for _ in range(random_gen.randint(1, line_length))]
print(" ".join(digits), file=target_out)
# Create random images/features
for s in source_list_img:
filename = os.path.join(work_dir, s)
generate_img_or_feat(filename, use_features)
# Generate and save the image size and feature size
size_out_file = os.path.join(work_dir, "image_feature_sizes.pkl")
with open(size_out_file, "wb") as fout:
pickle.dump({"image_shape": _CNN_INPUT_IMAGE_SHAPE,
"features_shape": _FEATURE_SHAPE}, fout)
@contextmanager
def tmp_img_captioning_dataset(
source_list: List[str],
prefix: str,
train_max_length: int,
dev_max_length: int,
test_max_length: int,
use_features: bool = False,
seed_train: int = 13,
seed_dev: int = 13):
with TemporaryDirectory(prefix=prefix) as work_dir:
# Simple digits files for train/dev data
train_source_path = os.path.join(work_dir, "train.src")
train_target_path = os.path.join(work_dir, "train.tgt")
dev_source_path = os.path.join(work_dir, "dev.src")
dev_target_path = os.path.join(work_dir, "dev.tgt")
test_source_path = os.path.join(work_dir, "test.src")
test_target_path = os.path.join(work_dir, "test.tgt")
generate_img_text_experiment_files(source_list, work_dir, train_source_path, train_target_path,
train_max_length, use_features, seed=seed_train)
generate_img_text_experiment_files(source_list, work_dir, dev_source_path, dev_target_path,
dev_max_length, use_features, seed=seed_dev)
generate_img_text_experiment_files(source_list, work_dir, test_source_path, test_target_path,
test_max_length, use_features, seed=seed_dev)
data = {'work_dir': work_dir,
'source': train_source_path,
'target': train_target_path,
'validation_source': dev_source_path,
'validation_target': dev_target_path,
'test_source': test_source_path,
'test_target': test_target_path}
yield data
_CAPTION_TRAIN_PARAMS_COMMON = \
"--use-cpu --max-seq-len {max_len} --source-root {source_root} --source {train_source} --target {train_target}" \
" --validation-source-root {dev_root} --validation-source {dev_source} --validation-target {dev_target} --output {model} {quiet}" \
" --seed {seed}"
_CAPTIONER_PARAMS_COMMON = "--use-cpu --models {model} --source-root {source_root} --input {input} --output {output} {quiet}"
def run_train_captioning(train_params: str,
translate_params: str,
translate_params_equiv: Optional[str],
train_source_path: str,
train_target_path: str,
dev_source_path: str,
dev_target_path: str,
test_source_path: str,
test_target_path: str,
max_seq_len: int = 10,
work_dir: Optional[str] = None,
seed: int = 13,
quiet: bool = False) -> Tuple[float, float, float, float]:
"""
Train a model and caption a dev set. Report validation perplexity and BLEU.
:param train_params: Command line args for model training.
:param translate_params: First command line args for translation.
:param translate_params_equiv: Second command line args for captuoning. Should produce the same outputs
:param train_source_path: Path to the source file.
:param train_target_path: Path to the target file.
:param dev_source_path: Path to the development source file.
:param dev_target_path: Path to the development target file.
:param test_source_path: Path to the test source file.
:param test_target_path: Path to the test target file.
:param max_seq_len: The maximum sequence length.
:param work_dir: The directory to store the model and other outputs in.
:param seed: The seed used for training.
:param quiet: Suppress the console output of training and decoding.
:return: A tuple containing perplexity, bleu scores for standard and reduced vocab decoding, chrf score.
"""
source_root = work_dir
if quiet:
quiet_arg = "--quiet"
else:
quiet_arg = ""
with TemporaryDirectory(dir=work_dir, prefix="test_train_translate.") as work_dir:
# Train model
model_path = os.path.join(work_dir, "model")
params = "{} {} {}".format(sockeye.image_captioning.train.__file__,
_CAPTION_TRAIN_PARAMS_COMMON.format(
source_root=source_root,
train_source=train_source_path,
train_target=train_target_path,
dev_root=source_root,
dev_source=dev_source_path,
dev_target=dev_target_path,
model=model_path,
max_len=max_seq_len,
seed=seed,
quiet=quiet_arg),
train_params)
logger.info("Starting training with parameters %s.", train_params)
with patch.object(sys, "argv", params.split()):
sockeye.image_captioning.train.main()
logger.info("Translating with parameters %s.", translate_params)
# Translate corpus with the 1st params
out_path = os.path.join(work_dir, "out.txt")
params = "{} {} {}".format(sockeye.image_captioning.captioner.__file__,
_CAPTIONER_PARAMS_COMMON.format(model=model_path,
source_root=source_root,
input=test_source_path,
output=out_path,
quiet=quiet_arg),
translate_params)
with patch.object(sys, "argv", params.split()):
sockeye.image_captioning.captioner.main()
# Translate corpus with the 2nd params
if translate_params_equiv is not None:
out_path_equiv = os.path.join(work_dir, "out_equiv.txt")
params = "{} {} {}".format(sockeye.image_captioning.captioner.__file__,
_CAPTIONER_PARAMS_COMMON.format(model=model_path,
source_root=source_root,
input=test_source_path,
output=out_path_equiv,
quiet=quiet_arg),
translate_params_equiv)
with patch.object(sys, "argv", params.split()):
sockeye.image_captioning.captioner.main()
# read-in both outputs, ensure they are the same
with open(out_path, 'rt') as f:
lines = f.readlines()
with open(out_path_equiv, 'rt') as f:
lines_equiv = f.readlines()
assert all(a == b for a, b in zip(lines, lines_equiv))
# test averaging
points = sockeye.average.find_checkpoints(model_path=model_path,
size=1,
strategy='best',
metric=C.PERPLEXITY)
assert len(points) > 0
averaged_params = sockeye.average.average(points)
assert averaged_params
# get best validation perplexity
metrics = sockeye.utils.read_metrics_file(path=os.path.join(model_path, C.METRICS_NAME))
perplexity = min(m[C.PERPLEXITY + '-val'] for m in metrics)
hypotheses = open(out_path, "r").readlines()
references = open(test_target_path, "r").readlines()
assert len(hypotheses) == len(references)
# compute metrics
bleu = raw_corpus_bleu(hypotheses=hypotheses, references=references, offset=0.01)
chrf = raw_corpus_chrf(hypotheses=hypotheses, references=references)
return perplexity, bleu, chrf
_EXTRACT_FEATURES_PARAMS_COMMON = \
"--use-cpu --image-root {image_root} --input {source_file} --output-root {output_root} " \
"--output {output_file} --image-encoder-model-path {image_encoder_model_path}"
def run_extract_features_captioning(source_image_size: tuple,
batch_size: int,
extract_params: str,
source_files: List[str],
image_root: str) -> None:
with TemporaryDirectory(dir=image_root, prefix="test_extract_feats") as work_dir:
model_path = os.path.join(work_dir, '2-conv-layer')
epoch = 0
# Create net and save to disk
create_simple_and_save_to_disk(model_path, epoch, source_image_size, batch_size)
# Extract features
for s in source_files:
with TemporaryDirectory(dir=work_dir, prefix="extracted_feats") as local_work_dir:
output_root = local_work_dir
output_file = os.path.join(local_work_dir, "random.features")
params = "{} {} {}".format(sockeye.image_captioning.extract_features.__file__,
_EXTRACT_FEATURES_PARAMS_COMMON.format(
image_root=image_root,
source_file=s,
output_root=output_root,
output_file=output_file,
image_encoder_model_path=model_path
),
extract_params)
logger.info("Starting feature extractopm with parameters %s.", extract_params)
with patch.object(sys, "argv", params.split()):
sockeye.image_captioning.extract_features.main()
def create_simple_and_save_to_disk(prefix, iteration, source_image_size, batch_size):
# init model
sym = get_2convnet_symbol()
mod = mx.mod.Module(sym)
mod.bind(data_shapes=[('data', (batch_size,) + source_image_size)],
label_shapes=[('softmax_label', (batch_size, 1))])
mod.init_params()
# save
mod.save_checkpoint(prefix, iteration)
def get_2convnet_symbol():
data = mx.symbol.Variable('data')
# first conv
conv1 = mx.symbol.Convolution(data=data, kernel=(5,5), num_filter=20, name='conv1')
tanh1 = mx.symbol.Activation(data=conv1, act_type="tanh")
pool1 = mx.symbol.Pooling(data=tanh1, pool_type="max",
kernel=(2,2), stride=(2,2))
# second conv
conv2 = mx.symbol.Convolution(data=pool1, kernel=(5,5), num_filter=50, name='conv2')
tanh2 = mx.symbol.Activation(data=conv2, act_type="tanh")
pool2 = mx.symbol.Pooling(data=tanh2, pool_type="max",
kernel=(2,2), stride=(2,2))
flatten = mx.symbol.Flatten(data=pool2)
fc2 = mx.symbol.FullyConnected(data=flatten, num_hidden=1)
# loss
outsym = mx.symbol.SoftmaxOutput(data=fc2, name='softmax')
return outsym
|
|
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A program for analyzing the Shaka compiled sources.
This can be used to find areas that can be removed if not needed. This uses
the source map (i.e. shaka-player.compiled.debug.map) to find the compiled code
size, see:
https://github.com/mattrobenolt/python-sourcemap
http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/
This script can output four different stats in two different formats:
- The size of functions and namespaces.
- The dependencies between types (in plain or DOT format).
- The dependencies between functions (in plain or DOT format).
- All tokens in the source map.
The dependencies can be outputted in DOT format which can be used with graph
programs to display a visual layout of the dependencies.
"""
import json
import logging
import math
import os
import string
import sys
import shakaBuildHelpers
# A Base64 VLQ digit can represent 5 bits, so it is Base32.
VLQ_BASE_SHIFT = 5
VLQ_BASE = 1 << VLQ_BASE_SHIFT
# A mask of bits for a VLQ digit (11111), 31 decimal
VLQ_BASE_MASK = VLQ_BASE - 1
# The continuation bit is the 6th bit
VLQ_CONTINUATION_BIT = VLQ_BASE
# Don't use Base64 lib since it is not a real Base64 string; it simply
# decodes each character to a single Base64 number.
B64 = dict((c, i) for i, c in
enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
'0123456789+/'))
def from_vlq_signed(value):
"""Converts a VLQ number to a normal signed number.
Args:
value: A number decoded from a VLQ string.
Returns:
an integer.
"""
negative = (value & 1) == 1
value >>= 1
return -value if negative else value
class Segment(object):
"""Defines an entry in the source map.
Members:
dst_col_offset - The offset of the destination column from the previous
segment.
name_offset - If not None, the offset of the name index from the previous
segment.
"""
def __init__(self, data):
self.dst_col_offset = data[0]
self.name_offset = data[4] if len(data) > 4 else None
def decode_segment(segment):
"""Decodes VLQ values from the given segment.
Args:
segment: A string containing the encoded segment text.
Returns:
the parsed Segment.
"""
values = []
cur, shift = 0, 0
for c in segment:
digit = B64[c]
cont = (digit & VLQ_CONTINUATION_BIT) != 0
digit &= VLQ_BASE_MASK
cur += digit << shift
shift += VLQ_BASE_SHIFT
if not cont:
values.append(from_vlq_signed(cur))
cur, shift = 0, 0
# A valid VLQ string should not have dangling bits.
assert cur == 0
assert shift == 0
return Segment(values)
class Token(object):
"""A Token represents one JavaScript symbol.
For example, this can be a variable or an equals sign. If this is a variable
or the keyword 'function' it will usually have a name which indicates what it
originally was defined. But there are also tokens such as ; and ( which appear
as tokens in the map but to not have explicit name (see isFunction).
Members:
dst_line - Line index in compiled code
dst_col - Column index in compiled code
name - Name of the token; or None
"""
def __init__(self, dst_line, dst_col, name=None):
self.dst_line = dst_line
self.dst_col = dst_col
self.name = name
def __str__(self):
return str(self.name)
def decode_mappings(line_data, names):
"""Decodes a mappings line of text.
Args:
line_data: A string containing the mapping line.
names: An array of strings containing the names of the objects.
Returns:
a list of Tokens
"""
tokens = []
lines = line_data.split(';')
name_id = 0
for dst_line, line in enumerate(lines):
dst_col = 0
segments = line.split(',')
for segment in segments:
if not segment:
continue
segment = decode_segment(segment)
dst_col += segment.dst_col_offset
# segment.dst_col can be negative (more useful in names below); however
# after applying a negative offset, the result must still be positive.
assert dst_col >= 0
name = None
if segment.name_offset is not None:
name_id += segment.name_offset
assert name_id >= 0
name = names[name_id]
tokens.append(Token(dst_line, dst_col, name))
return tokens
def is_function(token, lines):
"""Determines if the given token is the start of a function.
All function definitions are assumed to have a name field and the token in
the compiled source is the keyword 'function'. Sometimes the function is
defined on the previous semicolon and sometimes that semicolon appears on
the previous line.
Args:
token: The Token to check.
lines: An array of compiled code lines.
Returns:
whether the token is a function.
"""
# All functions have a name.
if not token.name:
return False
# Sometimes a function token starts with the previous ;
# Also sometimes the token starts on the ; that is on the previous
# line.
partial_line = lines[token.dst_line][token.dst_col:]
if partial_line == ';\n':
if len(lines) == token.dst_line + 1:
return False
else:
return lines[token.dst_line + 1].startswith('function')
else:
return (partial_line.startswith('function') or
partial_line.startswith(';function'))
def read_function(token_iter, prev, prev_index, lines, callback):
"""Reads a function from the token stream.
The function token should already be consumed.
Args:
token_iter: An iterator of the tokens.
prev: The token containing the function definition.
prev_index: The index of the previous token.
lines: An array of compiled code lines.
callback: A callback type used to create the data. See traverse_tokens.
Returns:
an array of State objects in a format controlled by the callback (see
traverse_tokens).
"""
brackets = 0
read = False
ret = []
partial_line = lines[prev.dst_line][prev.dst_col:]
state = callback(prev, prev_index)
try:
while not read or brackets > 0:
index, token = next(token_iter)
partial_line = lines[token.dst_line][token.dst_col:]
# Recursively read functions. Sometimes functions are defined nested.
# This doesn't happen that often, and never for Shaka methods, so it does
# not count it twice since the size of this method includes the nested
# function.
if is_function(token, lines):
ret += read_function(token_iter, token, index, lines, callback)
else:
state.add(token, index)
if partial_line.startswith('{}'):
read = True
elif partial_line[0] == '{':
brackets += 1
read = True
elif partial_line[0] == '}':
brackets -= 1
# When we run out of tokens, simply ignore it. A parent call will not see
# this error; but it will continue and the next call to 'next' will fail
# with another StopIteration. This ensures that the last State object
# is included for invalid content.
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
def traverse_tokens(tokens, lines, callback):
"""Traverses a list of tokens to identify functions.
Then uses a callback to perform some work on the functions. Each function
seen gets a new State object created from the given callback method; there is
a single State for global code which is given None in the constructor. Then,
each token seen is passed to the 'add' method of the State. This is used by
the State to either calculate sizes, print tokens, or detect dependencies.
The 'build' method is called at the end of the function to create a result
object that is returned as an array at the end.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
callback: A constructor that returns a state object. It takes a start
token or None if outside a function. It has two member functions
add - accepts the current token and the token's index.
build - returns an object to be added to the results.
Returns:
an array of State objects in a format controlled by the callback.
"""
ret = []
state = callback(None, None)
# Create a token iterator. This is used to read tokens from the array. We
# cannot use a for loop because the iterator is passed to readFunction.
token_iter = enumerate(tokens)
try:
while True:
index, token = next(token_iter)
if is_function(token, lines):
ret += read_function(token_iter, token, index, lines, callback)
else:
state.add(token, index)
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
class FunctionSize(object):
"""Contains information about a function's size."""
def __init__(self, name, size):
self.name = name
self.size = size
def print_tokens(tokens, lines, funcs):
"""Prints the given tokens.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
funcs: An array of FunctionSize.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, index):
# The start of a function, or the global start.
self.name = token.name if token else None
if token:
self._print_token('>', token, index)
def _print_token(self, prefix, token, index):
partial_line = lines[token.dst_line][token.dst_col:]
if len(tokens) > index + 1:
next_ = tokens[index + 1]
if next_.dst_line == token.dst_line:
partial_line = lines[token.dst_line][token.dst_col:next_.dst_col]
token_text = partial_line[:10].replace('\n', '').rjust(12)
print '%s %4d %4d %12s %s' % (prefix, token.dst_line, token.dst_col,
token_text, token.name)
def add(self, token, index):
"""Parses the given token.
Args:
token: The token to add.
index: The index of the token in the original array.
"""
prefix = None
if not self.name:
prefix = '!'
elif lines[token.dst_line][token.dst_col:token.dst_col+2] == '{}':
prefix = ' '
elif lines[token.dst_line][token.dst_col] == '{':
prefix = '+'
elif lines[token.dst_line][token.dst_col] == '}':
prefix = '-'
else:
prefix = ' '
self._print_token(prefix, token, index)
def build(self):
if not self.name:
return
# The end of a function. Print the size of this function.
size = 0
this_func = [t for t in funcs if t.name == self.name]
if this_func:
size = this_func[0].size
print 'X', self.name, size
traverse_tokens(tokens, lines, State)
class FunctionDependencies(object):
"""Contains information about a function's dependencies."""
def __init__(self, name, deps):
self.name = name
self.deps = deps
def process_deps(tokens, lines, is_class):
"""Processes the tokens into function or class dependencies.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
is_class: Whether to create a class graph instead of a function graph.
Returns:
an array of FunctionDependencies.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, _):
self.deps = []
self.name, self.parts = self._create_parts(token)
def _create_parts(self, token):
"""Creates an array of name parts.
Args:
token: The token to create the name from.
Returns:
A tuple of the name and the array of name parts.
"""
if not token or not token.name:
return (None, None)
parts = token.name.split('.')
name = token.name
# Instance methods are the same as static methods.
if len(parts) > 2 and parts[-2] == 'prototype':
del parts[-2]
# Strip function names if class graph; also remove it from the name.
if is_class:
if parts[-1][0] in string.lowercase:
del parts[-1]
name = '.'.join(parts)
return (name, parts)
def add(self, token, _):
"""Parses the given token.
Args:
token: The token to parse.
"""
# Ignore symbols outside a function. Only care about function
# references and only those that reference our code.
if not self.name or not token.name or not token.name.startswith('shaka.'):
return
name, other_parts = self._create_parts(token)
# Get the index of the first different namespace.
count = min(len(self.parts), len(other_parts))
i = 0
while i < count and self.parts[i] == other_parts[i]:
i += 1
# Ignore use of members of the same object:
# OfflineVideoSource.configure and OfflineVideoSource.store
if (i == count - 1 or i == count) and len(self.parts) == len(other_parts):
return
# Ignore use of the constructor of the same type:
# OfflineVideoSource and OfflineVideoSource.store
if i == count and abs(len(self.parts) - len(other_parts)) == 1:
return
# Add the dependency.
if name not in self.deps:
self.deps.append(name)
def build(self):
return FunctionDependencies(self.name, self.deps) if self.name else None
ret = traverse_tokens(tokens, lines, State)
assert ret
ret = sorted(ret, key=lambda key: key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
for dep in ret[i + 1].deps:
if dep not in ret[i].deps:
ret[i].deps.append(dep)
del ret[i + 1]
else:
i += 1
return ret
def process_sizes(tokens, lines):
"""Processes an array of tokens into function lengths.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
Returns:
an array of FunctionSizes sorted on name.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, _):
self.name = token.name if token else None
self.size = 0
self.start = token.dst_col if token else None
self.line = token.dst_line if token else None
def add(self, token, _):
"""Parses the given token.
Args:
token: The token to parse.
"""
# Ignore outside a function
if not self.name:
return
# If we skipped to the next line, include the code to the end of the line.
# If we skipped multiple lines, include the whole line. This will most
# likely never happen since the compiled code usually has new lines on
# function boundaries.
assert token.dst_line >= self.line
while token.dst_line != self.line:
self.size += len(lines[self.line]) - self.start
self.line += 1
self.start = 0
# Keep increasing the size. We can't simply keep the start and measure
# at the end since we are not given the end token in build().
self.size += token.dst_col - self.start
self.start = token.dst_col
def build(self):
return FunctionSize(self.name, self.size) if self.name else None
ret = traverse_tokens(tokens, lines, State)
assert ret
ret = [k for k in ret if k.name and
(k.name.startswith('shaka.') or k.name.startswith('goog.'))]
ret = sorted(ret, key=lambda key: key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
ret[i].size += ret[i + 1].size
del ret[i + 1]
else:
i += 1
return ret
def print_tree(results, indent, callback, end_callback):
"""Prints the results in an indented format.
Args:
results: An array of FunctionSizes sorted on name.
indent: A number to indent.
callback: A callback function to print the data. Accepts a title, an
indentation, and a sublist of the items in that group.
end_callback: A callback function called after a group; can be None.
"""
# This is used both when printing sizes and when printing dependencies in
# DOT format. This recursively creates groups of items with the same prefix.
# e.g.
# shaka
# shaka.util
# shaka.util.FailoverUri
# shaka.util.TypedBind
# shaka.player
# ...
if len(results) <= 1:
callback(None, indent, results)
return
# We want to group-by prefixes for the elements. Since it is sorted, we
# can find the overall prefix length.
first = results[0].name.split('.')
last = results[-1].name.split('.')
prefix = 0
while (prefix < len(first) and prefix < len(last)
and first[prefix] == last[prefix]):
prefix += 1
group = 0
group_items = first
if prefix == len(first):
# This happens when the group has a first element of a class name and the
# remaining are member functions. Remove the first element from this
# group.
group_items = results[1].name.split('.')
group = 1
# Start with second element, and go one more so we make sure to process the
# last group.
for i in range(1, len(results) + 1):
if i == len(results):
items = [''] * (prefix + 1)
else:
items = results[i].name.split('.')
if items[prefix] != group_items[prefix]:
title = '.'.join(group_items[:(prefix + 1)])
callback(title, indent, results[group:i])
print_tree(results[group:i], indent + 1, callback, end_callback)
# Set the start of the next group to the current element.
group = i
group_items = items
if end_callback:
end_callback(indent)
def print_sizes(sizes):
"""Prints the sizes in an indented format.
Args:
sizes: An array of FunctionSizes sorted on name.
"""
# This callback is used to print the total sizes of each of the sub-groups.
# Using the indent as padding allows to print a tree-like structure to
# show how big each section is.
def callback_factory(padding):
# Use a factory so we capture the padding.
def callback(title, indent, results):
if title:
size = sum([k.size for k in results])
print '%s %*d %s' % (indent * ' ', padding, size, title)
return callback
total = sum([k.size for k in sizes])
padding = int(math.ceil(math.log10(total)))
print '%*d %s' % (padding, total, 'TOTAL')
print_tree(sizes, 0, callback_factory(padding), None)
def print_deps(results, in_dot):
"""Prints the dependencies.
Arguments:
results: A sorted array of FunctionDependencies.
in_dot: Whether to print in DOT format.
"""
if not in_dot:
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if deps:
print name
for dep in deps:
print ' ', dep
return
dep_map = dict()
# Use the printTree to produce clusters for each namespace and type. This
# will print boxes around each class and show dependencies between types.
print 'digraph {'
def callback_factory(dep_map, temp):
"""Creates a callback function."""
def callback(title, indent, results):
if title:
if len(results) > 1:
print '\t' * indent, 'subgraph', 'cluster' + str(len(temp)), '{'
temp.append(1)
else:
print('\t' * indent, len(dep_map), '[',
'label="' + results[0].name + '"', ']', ';')
dep_map[results[0].name] = len(dep_map)
return callback
def end_callback(indent):
if indent > 1:
print '\t' * (indent - 1), '}'
print_tree(results, 1, callback_factory(dep_map, []), end_callback)
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if deps:
if name not in dep_map:
dep_map[name] = len(dep_map)
print '\t', dep_map[name], '[', 'label="' + name + '"', ']', ';'
for dep in deps:
if dep not in dep_map:
dep_map[dep] = len(dep_map)
print '\t', dep_map[dep], '[', 'label="' + dep + '"', ']', ';'
print '\t', dep_map[name], '->', dep_map[dep], ';'
print '}'
class Options(object):
"""Defines options to the script."""
def __init__(self):
self.print_deps = False
self.print_sizes = False
self.print_tokens = False
self.in_dot = False
self.is_class = False
def process(text, options):
"""Decodes a JSON string containing source map data.
Args:
text: A JSON string containing source map data.
options: An object containing the command-line options.
"""
# The spec allows a map file to start with )]} to prevent javascript from
# including it.
if text.startswith(')]}\'\n') or text.startswith(')]}\n'):
_, text = text.split('\n', 1)
# Decode the JSON data and get the parts we need.
data = json.loads(text)
# Paths are relative to the output directory.
base = os.path.join(shakaBuildHelpers.get_source_base(), 'dist')
file_lines = open(os.path.join(base, data['file'])).readlines()
names = data['names']
mappings = data['mappings']
tokens = decode_mappings(mappings, names)
sizes = process_sizes(tokens, file_lines)
# Print out one of the results.
if options.print_tokens:
print_tokens(tokens, file_lines, sizes)
elif options.print_sizes:
print_sizes(sizes)
elif options.print_deps or options.is_class:
temp = process_deps(tokens, file_lines, options.is_class)
print_deps(temp, options.in_dot)
def print_help():
"""Prints the help docs.
"""
print 'Usage:', sys.argv[0], """[options] [--] [source_map]
source_map must be either the path to the source map, or the name of the build
type. You must build Shaka first.
Types(must include exactly one):
-c --class-deps : Prints the class dependencies
-f --function-deps : Prints the function dependencies
-s --function-sizes : Prints the function sizes (in number of characters)
-t --all-tokens : Prints all tokens in the source map
Options:
-d --dot-format : Prints in DOT format; only valid with \
--function-deps or --class-dep
-h --help : Prints this help page
Token Format:
prefix line col token name => Token
X functionName size => end function
Prefixes:
> - start a function
! - not in a function
- - end curly brace
+ - start curly brace
- other token
DOT Format:
This can print the dependency graph in DOT format. This can be used with
graph programs to display a visual graph of dependencies. For example
using graphviz:
""", sys.argv[0], """-c -d | fdp -Goverlap=prism | neato -n2 -Tsvg > out.svg"""
def main(args):
options = Options()
done_args = False
name = 'shaka-player.compiled.debug.map'
# Process the command-line arguments.
for arg in args:
if done_args or arg[0] != '-':
name = arg
elif arg == '-f' or arg == '--function-deps':
options.print_deps = True
elif arg == '-t' or arg == '--all-tokens':
options.print_tokens = True
elif arg == '-s' or arg == '--function-sizes':
options.print_sizes = True
elif arg == '-c' or arg == '--class-deps':
options.is_class = True
elif arg == '-d' or arg == '--dot-format':
options.in_dot = True
elif arg == '--':
done_args = True
elif arg == '-h' or arg == '--help':
print_help()
return 0
else:
logging.error('Unrecognized argument: %s', arg)
print_help()
return 1
# Try to find the file
if not os.path.isfile(name):
# Get the source code base directory
base = shakaBuildHelpers.get_source_base()
# Supports the following searches:
# * File name given, map in dist/
# * Type given, map in working directory
# * Type given, map in dist/
if os.path.isfile(os.path.join(base, 'dist', name)):
name = os.path.join(base, 'dist', name)
elif os.path.isfile(
os.path.join('shaka-player.' + name + '.debug.map')):
name = os.path.join('shaka-player.' + name + '.debug.map')
elif os.path.isfile(
os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')):
name = os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')
else:
logging.error('"%s" not found; build Shaka first.', name)
return 1
# Verify arguments are correct.
if (options.print_sizes + options.print_deps + options.print_tokens +
options.is_class) != 1:
logging.error('Must include exactly one output type.')
print_help()
return 1
elif options.in_dot and not options.print_deps and not options.is_class:
logging.error('--dot-format only valid with --function-deps or '
'--class-deps.')
return 1
else:
process(open(name).read(), options)
return 0
if __name__ == '__main__':
shakaBuildHelpers.run_main(main)
|
|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Ops for computing v-trace learning targets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
# Dependency imports
import tensorflow.compat.v1 as tf
VTraceFromLogitsReturns = collections.namedtuple(
'VTraceFromLogitsReturns',
['vs', 'pg_advantages', 'log_rhos',
'behaviour_action_log_probs', 'target_action_log_probs'])
VTraceReturns = collections.namedtuple('VTraceReturns', 'vs pg_advantages')
def log_probs_from_logits_and_actions(policy_logits, actions):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
NUM_ACTIONS refers to the number of actions.
Args:
policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]` with
un-normalized log-probabilities parameterizing a softmax policy.
actions: An int32 tensor of shape `[T, B]` with actions.
Returns:
A float32 tensor of shape `[T, B]` corresponding to the sampling log
probability of the chosen action w.r.t. the policy.
"""
policy_logits = tf.convert_to_tensor(policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
return -tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=policy_logits, labels=actions)
def vtrace_from_logits(
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_logits'):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, `T` refers to the
time dimension ranging from `0` to `T-1`. `B` refers to the batch size and
`NUM_ACTIONS` refers to the number of actions.
Args:
behaviour_policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]`
with un-normalized log-probabilities parametrizing the softmax behaviour
policy.
target_policy_logits: A float32 tensor of shape `[T, B, NUM_ACTIONS]` with
un-normalized log-probabilities parametrizing the softmax target policy.
actions: An int32 tensor of shape `[T, B]` of actions sampled from the
behaviour policy.
discounts: A float32 tensor of shape `[T, B]` with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape `[T, B]` with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape `[T, B]` with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape `[B]` with the value function estimate
at time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape `[T, B]`. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape `[T, B]`. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape `[T, B]` containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape `[T, B]` containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape `[T, B]` containing
target policy action probabilities (log \pi(a_t)).
"""
behaviour_policy_logits = tf.convert_to_tensor(
behaviour_policy_logits, dtype=tf.float32)
target_policy_logits = tf.convert_to_tensor(
target_policy_logits, dtype=tf.float32)
actions = tf.convert_to_tensor(actions, dtype=tf.int32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits.shape.assert_has_rank(3)
target_policy_logits.shape.assert_has_rank(3)
actions.shape.assert_has_rank(2)
with tf.name_scope(name, values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value]):
target_action_log_probs = log_probs_from_logits_and_actions(
target_policy_logits, actions)
behaviour_action_log_probs = log_probs_from_logits_and_actions(
behaviour_policy_logits, actions)
log_rhos = target_action_log_probs - behaviour_action_log_probs
vtrace_returns = vtrace_from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict()
)
def vtrace_from_importance_weights(
log_rhos, discounts, rewards, values, bootstrap_value,
clip_rho_threshold=1.0, clip_pg_rho_threshold=1.0,
name='vtrace_from_importance_weights'):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is `[T, B, C]`, `values` is `[T, B, C]`,
`bootstrap_value` is `[B, C]`.
Args:
log_rhos: A float32 tensor of shape `[T, B]` representing the
log importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape `[T, B]` with discounts encountered
when following the behaviour policy.
rewards: A float32 tensor of shape `[T, B]` containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape `[T, B]` with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape `[B]` with the value function estimate
at time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape `[T, B]`. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape `[T, B]`. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(clip_rho_threshold,
dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(clip_pg_rho_threshold,
dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf.name_scope(name, values=[
log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(clip_rho_threshold, rhos, name='clipped_rhos')
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name='cs')
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
# Note that all sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# V-trace vs are calculated through a scan from the back to the beginning
# of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
name='scan')
# Reverse the results back to original order.
vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name='vs_minus_v_xs')
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name='vs')
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat([
vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(clip_pg_rho_threshold, rhos,
name='clipped_pg_rhos')
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
|
|
from django.db import models
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.db.models.options import normalize_together
from django.utils.functional import cached_property
from .fields import (
AddField, AlterField, FieldOperation, RemoveField, RenameField,
)
from .utils import ModelTuple, field_references_model
def _check_for_duplicates(arg_name, objs):
used_vals = set()
for val in objs:
if val in used_vals:
raise ValueError(
"Found duplicate value %s in CreateModel %s argument." % (val, arg_name)
)
used_vals.add(val)
class ModelOperation(Operation):
def __init__(self, name):
self.name = name
@cached_property
def name_lower(self):
return self.name.lower()
def references_model(self, name, app_label=None):
return name.lower() == self.name_lower
def reduce(self, operation, app_label=None):
return (
super().reduce(operation, app_label=app_label) or
not operation.references_model(self.name, app_label)
)
class CreateModel(ModelOperation):
"""Create a model's table."""
serialization_expand_args = ['fields', 'options', 'managers']
def __init__(self, name, fields, options=None, bases=None, managers=None):
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
self.managers = managers or []
super().__init__(name)
# Sanity-check that there are no duplicated field names, bases, or
# manager names
_check_for_duplicates('fields', (name for name, _ in self.fields))
_check_for_duplicates('bases', (
base._meta.label_lower if hasattr(base, '_meta') else
base.lower() if isinstance(base, str) else base
for base in self.bases
))
_check_for_duplicates('managers', (name for name, _ in self.managers))
def deconstruct(self):
kwargs = {
'name': self.name,
'fields': self.fields,
}
if self.options:
kwargs['options'] = self.options
if self.bases and self.bases != (models.Model,):
kwargs['bases'] = self.bases
if self.managers and self.managers != [('objects', models.Manager())]:
kwargs['managers'] = self.managers
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.add_model(ModelState(
app_label,
self.name,
list(self.fields),
dict(self.options),
tuple(self.bases),
list(self.managers),
))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name)
def references_model(self, name, app_label=None):
name_lower = name.lower()
if name_lower == self.name_lower:
return True
# Check we didn't inherit from the model
model_tuple = ModelTuple(app_label, name_lower)
for base in self.bases:
if (base is not models.Model and isinstance(base, (models.base.ModelBase, str)) and
ModelTuple.from_model(base) == model_tuple):
return True
# Check we have no FKs/M2Ms with it
for _name, field in self.fields:
if field_references_model(field, model_tuple):
return True
return False
def reduce(self, operation, app_label=None):
if (isinstance(operation, DeleteModel) and
self.name_lower == operation.name_lower and
not self.options.get("proxy", False)):
return []
elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower:
return [
CreateModel(
operation.new_name,
fields=self.fields,
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterModelOptions) and self.name_lower == operation.name_lower:
return [
CreateModel(
self.name,
fields=self.fields,
options={**self.options, **operation.options},
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterTogetherOptionOperation) and self.name_lower == operation.name_lower:
return [
CreateModel(
self.name,
fields=self.fields,
options={**self.options, **{operation.option_name: operation.option_value}},
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterOrderWithRespectTo) and self.name_lower == operation.name_lower:
return [
CreateModel(
self.name,
fields=self.fields,
options={**self.options, 'order_with_respect_to': operation.order_with_respect_to},
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower:
if isinstance(operation, AddField):
return [
CreateModel(
self.name,
fields=self.fields + [(operation.name, operation.field)],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, AlterField):
return [
CreateModel(
self.name,
fields=[
(n, operation.field if n == operation.name else v)
for n, v in self.fields
],
options=self.options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RemoveField):
options = self.options.copy()
for option_name in ('unique_together', 'index_together'):
option = options.pop(option_name, None)
if option:
option = set(filter(bool, (
tuple(f for f in fields if f != operation.name_lower) for fields in option
)))
if option:
options[option_name] = option
order_with_respect_to = options.get('order_with_respect_to')
if order_with_respect_to == operation.name_lower:
del options['order_with_respect_to']
return [
CreateModel(
self.name,
fields=[
(n, v)
for n, v in self.fields
if n.lower() != operation.name_lower
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
elif isinstance(operation, RenameField):
options = self.options.copy()
for option_name in ('unique_together', 'index_together'):
option = options.get(option_name)
if option:
options[option_name] = {
tuple(operation.new_name if f == operation.old_name else f for f in fields)
for fields in option
}
order_with_respect_to = options.get('order_with_respect_to')
if order_with_respect_to == operation.old_name:
options['order_with_respect_to'] = operation.new_name
return [
CreateModel(
self.name,
fields=[
(operation.new_name if n == operation.old_name else n, v)
for n, v in self.fields
],
options=options,
bases=self.bases,
managers=self.managers,
),
]
return super().reduce(operation, app_label=app_label)
class DeleteModel(ModelOperation):
"""Drop a model's table."""
def deconstruct(self):
kwargs = {
'name': self.name,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.remove_model(app_label, self.name_lower)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def references_model(self, name, app_label=None):
# The deleted model could be referencing the specified model through
# related fields.
return True
def describe(self):
return "Delete model %s" % self.name
class RenameModel(ModelOperation):
"""Rename a model."""
def __init__(self, old_name, new_name):
self.old_name = old_name
self.new_name = new_name
super().__init__(old_name)
@cached_property
def old_name_lower(self):
return self.old_name.lower()
@cached_property
def new_name_lower(self):
return self.new_name.lower()
def deconstruct(self):
kwargs = {
'old_name': self.old_name,
'new_name': self.new_name,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
# Add a new model.
renamed_model = state.models[app_label, self.old_name_lower].clone()
renamed_model.name = self.new_name
state.models[app_label, self.new_name_lower] = renamed_model
# Repoint all fields pointing to the old model to the new one.
old_model_tuple = ModelTuple(app_label, self.old_name_lower)
new_remote_model = '%s.%s' % (app_label, self.new_name)
to_reload = []
for (model_app_label, model_name), model_state in state.models.items():
model_changed = False
for index, (name, field) in enumerate(model_state.fields):
changed_field = None
remote_field = field.remote_field
if remote_field:
remote_model_tuple = ModelTuple.from_model(
remote_field.model, model_app_label, model_name
)
if remote_model_tuple == old_model_tuple:
changed_field = field.clone()
changed_field.remote_field.model = new_remote_model
through_model = getattr(remote_field, 'through', None)
if through_model:
through_model_tuple = ModelTuple.from_model(
through_model, model_app_label, model_name
)
if through_model_tuple == old_model_tuple:
if changed_field is None:
changed_field = field.clone()
changed_field.remote_field.through = new_remote_model
if changed_field:
model_state.fields[index] = name, changed_field
model_changed = True
if model_changed:
to_reload.append((model_app_label, model_name))
# Reload models related to old model before removing the old model.
state.reload_models(to_reload, delay=True)
# Remove the old model.
state.remove_model(app_label, self.old_name_lower)
state.reload_model(app_label, self.new_name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.new_name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.old_name)
# Move the main table
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Alter the fields pointing to us
for related_object in old_model._meta.related_objects:
if related_object.related_model == old_model:
model = new_model
related_key = (app_label, self.new_name_lower)
else:
model = related_object.related_model
related_key = (
related_object.related_model._meta.app_label,
related_object.related_model._meta.model_name,
)
to_field = to_state.apps.get_model(
*related_key
)._meta.get_field(related_object.field.name)
schema_editor.alter_field(
model,
related_object.field,
to_field,
)
# Rename M2M fields whose name is based on this model's name.
fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many)
for (old_field, new_field) in fields:
# Skip self-referential fields as these are renamed above.
if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created:
continue
# Rename the M2M table that's based on this model's name.
old_m2m_model = old_field.remote_field.through
new_m2m_model = new_field.remote_field.through
schema_editor.alter_db_table(
new_m2m_model,
old_m2m_model._meta.db_table,
new_m2m_model._meta.db_table,
)
# Rename the column in the M2M table that's based on this
# model's name.
schema_editor.alter_field(
new_m2m_model,
old_m2m_model._meta.get_field(old_model._meta.model_name),
new_m2m_model._meta.get_field(new_model._meta.model_name),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
self.database_forwards(app_label, schema_editor, from_state, to_state)
self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower
self.new_name, self.old_name = self.old_name, self.new_name
def references_model(self, name, app_label=None):
return (
name.lower() == self.old_name_lower or
name.lower() == self.new_name_lower
)
def describe(self):
return "Rename model %s to %s" % (self.old_name, self.new_name)
def reduce(self, operation, app_label=None):
if (isinstance(operation, RenameModel) and
self.new_name_lower == operation.old_name_lower):
return [
RenameModel(
self.old_name,
operation.new_name,
),
]
# Skip `ModelOperation.reduce` as we want to run `references_model`
# against self.new_name.
return (
super(ModelOperation, self).reduce(operation, app_label=app_label) or
not operation.references_model(self.new_name, app_label)
)
class ModelOptionOperation(ModelOperation):
def reduce(self, operation, app_label=None):
if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower:
return [operation]
return super().reduce(operation, app_label=app_label)
class AlterModelTable(ModelOptionOperation):
"""Rename a model's table."""
def __init__(self, name, table):
self.table = table
super().__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'table': self.table,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
state.models[app_label, self.name_lower].options["db_table"] = self.table
state.reload_model(app_label, self.name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)"
)
class AlterTogetherOptionOperation(ModelOptionOperation):
option_name = None
def __init__(self, name, option_value):
if option_value:
option_value = set(normalize_together(option_value))
setattr(self, self.option_name, option_value)
super().__init__(name)
@cached_property
def option_value(self):
return getattr(self, self.option_name)
def deconstruct(self):
kwargs = {
'name': self.name,
self.option_name: self.option_value,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options[self.option_name] = self.option_value
state.reload_model(app_label, self.name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
alter_together = getattr(schema_editor, 'alter_%s' % self.option_name)
alter_together(
new_model,
getattr(old_model._meta, self.option_name, set()),
getattr(new_model._meta, self.option_name, set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
not self.option_value or
any((name in fields) for fields in self.option_value)
)
)
def describe(self):
return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.option_value or ''))
class AlterUniqueTogether(AlterTogetherOptionOperation):
"""
Change the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = 'unique_together'
def __init__(self, name, unique_together):
super().__init__(name, unique_together)
class AlterIndexTogether(AlterTogetherOptionOperation):
"""
Change the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
def __init__(self, name, index_together):
super().__init__(name, index_together)
class AlterOrderWithRespectTo(ModelOptionOperation):
"""Represent a change with the order_with_respect_to option."""
option_name = 'order_with_respect_to'
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
super().__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'order_with_respect_to': self.order_with_respect_to,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options['order_with_respect_to'] = self.order_with_respect_to
state.reload_model(app_label, self.name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
to_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, to_model):
from_model = from_state.apps.get_model(app_label, self.name)
# Remove a field if we need to
if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to:
schema_editor.remove_field(from_model, from_model._meta.get_field("_order"))
# Add a field if we need to (altering the column is untouched as
# it's likely a rename)
elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to:
field = to_model._meta.get_field("_order")
if not field.has_default():
field.default = 0
schema_editor.add_field(
from_model,
field,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def references_field(self, model_name, name, app_label=None):
return (
self.references_model(model_name, app_label) and
(
self.order_with_respect_to is None or
name == self.order_with_respect_to
)
)
def describe(self):
return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to)
class AlterModelOptions(ModelOptionOperation):
"""
Set new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
# Model options we want to compare and preserve in an AlterModelOptions op
ALTER_OPTION_KEYS = [
"base_manager_name",
"default_manager_name",
"default_related_name",
"get_latest_by",
"managed",
"ordering",
"permissions",
"default_permissions",
"select_on_save",
"verbose_name",
"verbose_name_plural",
]
def __init__(self, name, options):
self.options = options
super().__init__(name)
def deconstruct(self):
kwargs = {
'name': self.name,
'options': self.options,
}
return (
self.__class__.__qualname__,
[],
kwargs
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.options = {**model_state.options, **self.options}
for key in self.ALTER_OPTION_KEYS:
if key not in self.options:
model_state.options.pop(key, False)
state.reload_model(app_label, self.name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change Meta options on %s" % self.name
class AlterModelManagers(ModelOptionOperation):
"""Alter the model's managers."""
serialization_expand_args = ['managers']
def __init__(self, name, managers):
self.managers = managers
super().__init__(name)
def deconstruct(self):
return (
self.__class__.__qualname__,
[self.name, self.managers],
{}
)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name_lower]
model_state.managers = list(self.managers)
state.reload_model(app_label, self.name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
pass
def database_backwards(self, app_label, schema_editor, from_state, to_state):
pass
def describe(self):
return "Change managers on %s" % self.name
class IndexOperation(Operation):
option_name = 'indexes'
@cached_property
def model_name_lower(self):
return self.model_name.lower()
class AddIndex(IndexOperation):
"""Add an index on a model."""
def __init__(self, model_name, index):
self.model_name = model_name
if not index.name:
raise ValueError(
"Indexes passed to AddIndex operations require a name "
"argument. %r doesn't have one." % index
)
self.index = index
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
model_state.options[self.option_name] = [*model_state.options[self.option_name], self.index.clone()]
state.reload_model(app_label, self.model_name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'index': self.index,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
return 'Create index %s on field(s) %s of model %s' % (
self.index.name,
', '.join(self.index.fields),
self.model_name,
)
class RemoveIndex(IndexOperation):
"""Remove an index from a model."""
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
indexes = model_state.options[self.option_name]
model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name]
state.reload_model(app_label, self.model_name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
index = from_model_state.get_index_by_name(self.name)
schema_editor.remove_index(model, index)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
index = to_model_state.get_index_by_name(self.name)
schema_editor.add_index(model, index)
def deconstruct(self):
kwargs = {
'model_name': self.model_name,
'name': self.name,
}
return (
self.__class__.__qualname__,
[],
kwargs,
)
def describe(self):
return 'Remove index %s from %s' % (self.name, self.model_name)
class AddConstraint(IndexOperation):
option_name = 'constraints'
def __init__(self, model_name, constraint):
self.model_name = model_name
self.constraint = constraint
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
model_state.options[self.option_name] = [*model_state.options[self.option_name], self.constraint]
state.reload_model(app_label, self.model_name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_constraint(model, self.constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_constraint(model, self.constraint)
def deconstruct(self):
return self.__class__.__name__, [], {
'model_name': self.model_name,
'constraint': self.constraint,
}
def describe(self):
return 'Create constraint %s on model %s' % (self.constraint.name, self.model_name)
class RemoveConstraint(IndexOperation):
option_name = 'constraints'
def __init__(self, model_name, name):
self.model_name = model_name
self.name = name
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.model_name_lower]
constraints = model_state.options[self.option_name]
model_state.options[self.option_name] = [c for c in constraints if c.name != self.name]
state.reload_model(app_label, self.model_name_lower, delay=True)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
from_model_state = from_state.models[app_label, self.model_name_lower]
constraint = from_model_state.get_constraint_by_name(self.name)
schema_editor.remove_constraint(model, constraint)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
to_model_state = to_state.models[app_label, self.model_name_lower]
constraint = to_model_state.get_constraint_by_name(self.name)
schema_editor.add_constraint(model, constraint)
def deconstruct(self):
return self.__class__.__name__, [], {
'model_name': self.model_name,
'name': self.name,
}
def describe(self):
return 'Remove constraint %s from model %s' % (self.name, self.model_name)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.text
~~~~~~~~~~~~~~~~~~~~
Lexers for non-source code file types.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
bygroups, include, using, this, do_insertions
from pygments.token import Punctuation, Text, Comment, Keyword, Name, String, \
Generic, Operator, Number, Whitespace, Literal
from pygments.util import get_bool_opt
from pygments.lexers.other import BashLexer
__all__ = ['IniLexer', 'SourcesListLexer', 'BaseMakefileLexer',
'MakefileLexer', 'DiffLexer', 'IrcLogsLexer', 'TexLexer',
'GroffLexer', 'ApacheConfLexer', 'BBCodeLexer', 'MoinWikiLexer',
'RstLexer', 'VimLexer', 'GettextLexer', 'SquidConfLexer',
'DebianControlLexer', 'DarcsPatchLexer', 'YamlLexer',
'LighttpdConfLexer', 'NginxConfLexer', 'CMakeLexer']
class IniLexer(RegexLexer):
"""
Lexer for configuration files in INI style.
"""
name = 'INI'
aliases = ['ini', 'cfg']
filenames = ['*.ini', '*.cfg', '*.properties']
mimetypes = ['text/x-ini']
tokens = {
'root': [
(r'\s+', Text),
(r'[;#].*?$', Comment),
(r'\[.*?\]$', Keyword),
(r'(.*?)([ \t]*)(=)([ \t]*)(.*?)$',
bygroups(Name.Attribute, Text, Operator, Text, String))
]
}
def analyse_text(text):
npos = text.find('\n')
if npos < 3:
return False
return text[0] == '[' and text[npos-1] == ']'
class SourcesListLexer(RegexLexer):
"""
Lexer that highlights debian sources.list files.
*New in Pygments 0.7.*
"""
name = 'Debian Sourcelist'
aliases = ['sourceslist', 'sources.list']
filenames = ['sources.list']
mimetype = ['application/x-debian-sourceslist']
tokens = {
'root': [
(r'\s+', Text),
(r'#.*?$', Comment),
(r'^(deb(?:-src)?)(\s+)',
bygroups(Keyword, Text), 'distribution')
],
'distribution': [
(r'#.*?$', Comment, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\s$[]+', String),
(r'\[', String.Other, 'escaped-distribution'),
(r'\$', String),
(r'\s+', Text, 'components')
],
'escaped-distribution': [
(r'\]', String.Other, '#pop'),
(r'\$\(ARCH\)', Name.Variable),
(r'[^\]$]+', String.Other),
(r'\$', String.Other)
],
'components': [
(r'#.*?$', Comment, '#pop:2'),
(r'$', Text, '#pop:2'),
(r'\s+', Text),
(r'\S+', Keyword.Pseudo),
]
}
def analyse_text(text):
for line in text.split('\n'):
line = line.strip()
if not (line.startswith('#') or line.startswith('deb ') or
line.startswith('deb-src ') or not line):
return False
return True
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
*New in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
(r'\$\((?:.*\\\n|.*\n)+', using(BashLexer)),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[a-zA-Z0-9_${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([a-zA-Z0-9_${}.-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n|.*\n)+)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# TODO: add paren handling (grr)
],
'export': [
(r'[a-zA-Z0-9_${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[^,\\\n#]+', Number),
(r',', Punctuation),
(r'#.*?\n', Comment),
(r'\\\n', Text), # line continuation
(r'\\.', Text),
(r'(?:[\t ]+.*\n|\n)+', using(BashLexer), '#pop'),
],
}
class DiffLexer(RegexLexer):
"""
Lexer for unified or context-style diffs or patches.
"""
name = 'Diff'
aliases = ['diff', 'udiff']
filenames = ['*.diff', '*.patch']
mimetypes = ['text/x-diff', 'text/x-patch']
tokens = {
'root': [
(r' .*\n', Text),
(r'\+.*\n', Generic.Inserted),
(r'-.*\n', Generic.Deleted),
(r'!.*\n', Generic.Strong),
(r'@.*\n', Generic.Subheading),
(r'([Ii]ndex|diff).*\n', Generic.Heading),
(r'=.*\n', Generic.Heading),
(r'.*\n', Text),
]
}
def analyse_text(text):
if text[:7] == 'Index: ':
return True
if text[:5] == 'diff ':
return True
if text[:4] == '--- ':
return 0.9
DPATCH_KEYWORDS = ['hunk', 'addfile', 'adddir', 'rmfile', 'rmdir', 'move',
'replace']
class DarcsPatchLexer(RegexLexer):
"""
DarcsPatchLexer is a lexer for the various versions of the darcs patch
format. Examples of this format are derived by commands such as
``darcs annotate --patch`` and ``darcs send``.
*New in Pygments 0.10.*
"""
name = 'Darcs Patch'
aliases = ['dpatch']
filenames = ['*.dpatch', '*.darcspatch']
tokens = {
'root': [
(r'<', Operator),
(r'>', Operator),
(r'{', Operator),
(r'}', Operator),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)(\])',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text, Operator)),
(r'(\[)((?:TAG )?)(.*)(\n)(.*)(\*\*)(\d+)(\s?)',
bygroups(Operator, Keyword, Name, Text, Name, Operator,
Literal.Date, Text), 'comment'),
(r'New patches:', Generic.Heading),
(r'Context:', Generic.Heading),
(r'Patch bundle hash:', Generic.Heading),
(r'(\s*)(%s)(.*\n)' % '|'.join(DPATCH_KEYWORDS),
bygroups(Text, Keyword, Text)),
(r'\+', Generic.Inserted, "insert"),
(r'-', Generic.Deleted, "delete"),
(r'.*\n', Text),
],
'comment': [
(r'[^\]].*\n', Comment),
(r'\]', Operator, "#pop"),
],
'specialText': [ # darcs add [_CODE_] special operators for clarity
(r'\n', Text, "#pop"), # line-based
(r'\[_[^_]*_]', Operator),
],
'insert': [
include('specialText'),
(r'\[', Generic.Inserted),
(r'[^\n\[]*', Generic.Inserted),
],
'delete': [
include('specialText'),
(r'\[', Generic.Deleted),
(r'[^\n\[]*', Generic.Deleted),
],
}
class IrcLogsLexer(RegexLexer):
"""
Lexer for IRC logs in *irssi*, *xchat* or *weechat* style.
"""
name = 'IRC logs'
aliases = ['irc']
filenames = ['*.weechatlog']
mimetypes = ['text/x-irclog']
flags = re.VERBOSE | re.MULTILINE
timestamp = r"""
(
# irssi / xchat and others
(?: \[|\()? # Opening bracket or paren for the timestamp
(?: # Timestamp
(?: (?:\d{1,4} [-/]?)+ # Date as - or /-separated groups of digits
[T ])? # Date/time separator: T or space
(?: \d?\d [:.]?)+ # Time as :/.-separated groups of 1 or 2 digits
)
(?: \]|\))?\s+ # Closing bracket or paren for the timestamp
|
# weechat
\d{4}\s\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
|
# xchat
\w{3}\s\d{2}\s # Date
\d{2}:\d{2}:\d{2}\s+ # Time + Whitespace
)?
"""
tokens = {
'root': [
# log start/end
(r'^\*\*\*\*(.*)\*\*\*\*$', Comment),
# hack
("^" + timestamp + r'(\s*<[^>]*>\s*)$', bygroups(Comment.Preproc, Name.Tag)),
# normal msgs
("^" + timestamp + r"""
(\s*<.*?>\s*) # Nick """,
bygroups(Comment.Preproc, Name.Tag), 'msg'),
# /me msgs
("^" + timestamp + r"""
(\s*[*]\s+) # Star
([^\s]+\s+.*?\n) # Nick + rest of message """,
bygroups(Comment.Preproc, Keyword, Generic.Inserted)),
# join/part msgs
("^" + timestamp + r"""
(\s*(?:\*{3}|<?-[!@=P]?->?)\s*) # Star(s) or symbols
([^\s]+\s+) # Nick + Space
(.*?\n) # Rest of message """,
bygroups(Comment.Preproc, Keyword, String, Comment)),
(r"^.*?\n", Text),
],
'msg': [
(r"[^\s]+:(?!//)", Name.Attribute), # Prefix
(r".*\n", Text, '#pop'),
],
}
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
*New in Pygments 0.6.*
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
(r'', Text, '#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
*New in Pygments 0.6.*
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(?i)(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]*', Text, 'textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(..', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class ApacheConfLexer(RegexLexer):
"""
Lexer for configuration files following the Apache config file
format.
*New in Pygments 0.6.*
"""
name = 'ApacheConf'
aliases = ['apacheconf', 'aconf', 'apache']
filenames = ['.htaccess', 'apache.conf', 'apache2.conf']
mimetypes = ['text/x-apacheconf']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\s+', Text),
(r'(#.*?)$', Comment),
(r'(<[^\s>]+)(?:(\s+)(.*?))?(>)',
bygroups(Name.Tag, Text, String, Name.Tag)),
(r'([a-zA-Z][a-zA-Z0-9]*)(\s+)',
bygroups(Name.Builtin, Text), 'value'),
(r'\.+', Text),
],
'value': [
(r'$', Text, '#pop'),
(r'[^\S\n]+', Text),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'\d+', Number),
(r'/([a-zA-Z0-9][a-zA-Z0-9_./-]+)', String.Other),
(r'(on|off|none|any|all|double|email|dns|min|minimal|'
r'os|productonly|full|emerg|alert|crit|error|warn|'
r'notice|info|debug|registry|script|inetd|standalone|'
r'user|group)\b', Keyword),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'[^\s"]+', Text)
]
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
*New in Pygments 0.7.*
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'({{{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-zivx]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'}}}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'{{{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
*New in Pygments 0.7.*
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: langauge`` and
``.. code:: language`` directives with a lexer for the given
language (default: ``True``). *New in Pygments 0.8.*
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w-]+)(::)(?:([ \t]*)(.+))?',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, Keyword)),
# A reference target
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:.*?:)([ \t]+)(.*?)$', bygroups(Text, Name.Class, Text,
Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`)(.+?)(`__?)',
bygroups(Punctuation, using(this), Punctuation)), # reference
(r'(`.+?`)(:[a-zA-Z0-9-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # user-defined role
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`\\]+', String),
(r'\\.', String),
(r'``', String, '#pop'),
(r'[`\\]', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class VimLexer(RegexLexer):
"""
Lexer for VimL script files.
*New in Pygments 0.8.*
"""
name = 'VimL'
aliases = ['vim']
filenames = ['*.vim', '.vimrc']
mimetypes = ['text/x-vim']
flags = re.MULTILINE
tokens = {
'root': [
# Who decided that doublequote was a good comment character??
(r'^\s*".*', Comment),
(r'(?<=\s)"[^\-:.%#=*].*', Comment),
(r'[ \t]+', Text),
# TODO: regexes can have other delims
(r'/(\\\\|\\/|[^\n/])*/', String.Regex),
(r'"(\\\\|\\"|[^\n"])*"', String.Double),
(r"'(\\\\|\\'|[^\n'])*'", String.Single),
(r'-?\d+', Number),
(r'#[0-9a-f]{6}', Number.Hex),
(r'^:', Punctuation),
(r'[()<>+=!|,~-]', Punctuation), # Inexact list. Looks decent.
(r'\b(let|if|else|endif|elseif|fun|function|endfunction)\b',
Keyword),
(r'\b(NONE|bold|italic|underline|dark|light)\b', Name.Builtin),
(r'\b\w+\b', Name.Other), # These are postprocessed below
(r'.', Text),
],
}
def __init__(self, **options):
from pygments.lexers._vimbuiltins import command, option, auto
self._cmd = command
self._opt = option
self._aut = auto
RegexLexer.__init__(self, **options)
def is_in(self, w, mapping):
r"""
It's kind of difficult to decide if something might be a keyword
in VimL because it allows you to abbreviate them. In fact,
'ab[breviate]' is a good example. :ab, :abbre, or :abbreviate are
valid ways to call it so rather than making really awful regexps
like::
\bab(?:b(?:r(?:e(?:v(?:i(?:a(?:t(?:e)?)?)?)?)?)?)?)?\b
we match `\b\w+\b` and then call is_in() on those tokens. See
`scripts/get_vimkw.py` for how the lists are extracted.
"""
p = bisect(mapping, (w,))
if p > 0:
if mapping[p-1][0] == w[:len(mapping[p-1][0])] and \
mapping[p-1][1][:len(w)] == w: return True
if p < len(mapping):
return mapping[p][0] == w[:len(mapping[p][0])] and \
mapping[p][1][:len(w)] == w
return False
def get_tokens_unprocessed(self, text):
# TODO: builtins are only subsequent tokens on lines
# and 'keywords' only happen at the beginning except
# for :au ones
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name.Other:
if self.is_in(value, self._cmd):
yield index, Keyword, value
elif self.is_in(value, self._opt) or \
self.is_in(value, self._aut):
yield index, Name.Builtin, value
else:
yield index, Text, value
else:
yield index, token, value
class GettextLexer(RegexLexer):
"""
Lexer for Gettext catalog files.
*New in Pygments 0.9.*
"""
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
tokens = {
'root': [
(r'^#,\s.*?$', Keyword.Type),
(r'^#:\s.*?$', Keyword.Declaration),
#(r'^#$', Comment),
(r'^(#|#\.\s|#\|\s|#~\s|#\s).*$', Comment.Single),
(r'^(")([\w-]*:)(.*")$',
bygroups(String, Name.Property, String)),
(r'^".*"$', String),
(r'^(msgid|msgid_plural|msgstr)(\s+)(".*")$',
bygroups(Name.Variable, Text, String)),
(r'^(msgstr\[)(\d)(\])(\s+)(".*")$',
bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String)),
]
}
class SquidConfLexer(RegexLexer):
"""
Lexer for `squid <http://www.squid-cache.org/>`_ configuration files.
*New in Pygments 0.9.*
"""
name = 'SquidConf'
aliases = ['squidconf', 'squid.conf', 'squid']
filenames = ['squid.conf']
mimetypes = ['text/x-squidconf']
flags = re.IGNORECASE
keywords = [ "acl", "always_direct", "announce_host",
"announce_period", "announce_port", "announce_to",
"anonymize_headers", "append_domain", "as_whois_server",
"auth_param_basic", "authenticate_children",
"authenticate_program", "authenticate_ttl", "broken_posts",
"buffered_logs", "cache_access_log", "cache_announce",
"cache_dir", "cache_dns_program", "cache_effective_group",
"cache_effective_user", "cache_host", "cache_host_acl",
"cache_host_domain", "cache_log", "cache_mem",
"cache_mem_high", "cache_mem_low", "cache_mgr",
"cachemgr_passwd", "cache_peer", "cache_peer_access",
"cahce_replacement_policy", "cache_stoplist",
"cache_stoplist_pattern", "cache_store_log", "cache_swap",
"cache_swap_high", "cache_swap_log", "cache_swap_low",
"client_db", "client_lifetime", "client_netmask",
"connect_timeout", "coredump_dir", "dead_peer_timeout",
"debug_options", "delay_access", "delay_class",
"delay_initial_bucket_level", "delay_parameters",
"delay_pools", "deny_info", "dns_children", "dns_defnames",
"dns_nameservers", "dns_testnames", "emulate_httpd_log",
"err_html_text", "fake_user_agent", "firewall_ip",
"forwarded_for", "forward_snmpd_port", "fqdncache_size",
"ftpget_options", "ftpget_program", "ftp_list_width",
"ftp_passive", "ftp_user", "half_closed_clients",
"header_access", "header_replace", "hierarchy_stoplist",
"high_response_time_warning", "high_page_fault_warning",
"htcp_port", "http_access", "http_anonymizer", "httpd_accel",
"httpd_accel_host", "httpd_accel_port",
"httpd_accel_uses_host_header", "httpd_accel_with_proxy",
"http_port", "http_reply_access", "icp_access",
"icp_hit_stale", "icp_port", "icp_query_timeout",
"ident_lookup", "ident_lookup_access", "ident_timeout",
"incoming_http_average", "incoming_icp_average",
"inside_firewall", "ipcache_high", "ipcache_low",
"ipcache_size", "local_domain", "local_ip", "logfile_rotate",
"log_fqdn", "log_icp_queries", "log_mime_hdrs",
"maximum_object_size", "maximum_single_addr_tries",
"mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr",
"mcast_miss_encode_key", "mcast_miss_port", "memory_pools",
"memory_pools_limit", "memory_replacement_policy",
"mime_table", "min_http_poll_cnt", "min_icp_poll_cnt",
"minimum_direct_hops", "minimum_object_size",
"minimum_retry_timeout", "miss_access", "negative_dns_ttl",
"negative_ttl", "neighbor_timeout", "neighbor_type_domain",
"netdb_high", "netdb_low", "netdb_ping_period",
"netdb_ping_rate", "never_direct", "no_cache",
"passthrough_proxy", "pconn_timeout", "pid_filename",
"pinger_program", "positive_dns_ttl", "prefer_direct",
"proxy_auth", "proxy_auth_realm", "query_icmp", "quick_abort",
"quick_abort", "quick_abort_max", "quick_abort_min",
"quick_abort_pct", "range_offset_limit", "read_timeout",
"redirect_children", "redirect_program",
"redirect_rewrites_host_header", "reference_age",
"reference_age", "refresh_pattern", "reload_into_ims",
"request_body_max_size", "request_size", "request_timeout",
"shutdown_lifetime", "single_parent_bypass",
"siteselect_timeout", "snmp_access", "snmp_incoming_address",
"snmp_port", "source_ping", "ssl_proxy",
"store_avg_object_size", "store_objects_per_bucket",
"strip_query_terms", "swap_level1_dirs", "swap_level2_dirs",
"tcp_incoming_address", "tcp_outgoing_address",
"tcp_recv_bufsize", "test_reachability", "udp_hit_obj",
"udp_hit_obj_size", "udp_incoming_address",
"udp_outgoing_address", "unique_hostname", "unlinkd_program",
"uri_whitespace", "useragent_log", "visible_hostname",
"wais_relay", "wais_relay_host", "wais_relay_port",
]
opts = [ "proxy-only", "weight", "ttl", "no-query", "default",
"round-robin", "multicast-responder", "on", "off", "all",
"deny", "allow", "via", "parent", "no-digest", "heap", "lru",
"realm", "children", "credentialsttl", "none", "disable",
"offline_toggle", "diskd", "q1", "q2",
]
actions = [ "shutdown", "info", "parameter", "server_list",
"client_list", r'squid\.conf',
]
actions_stats = [ "objects", "vm_objects", "utilization",
"ipcache", "fqdncache", "dns", "redirector", "io",
"reply_headers", "filedescriptors", "netdb",
]
actions_log = [ "status", "enable", "disable", "clear"]
acls = [ "url_regex", "urlpath_regex", "referer_regex", "port",
"proto", "req_mime_type", "rep_mime_type", "method",
"browser", "user", "src", "dst", "time", "dstdomain", "ident",
"snmp_community",
]
ip_re = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
def makelistre(list):
return r'\b(?:'+'|'.join(list)+r')\b'
tokens = {
'root': [
(r'\s+', Text),
(r'#', Comment, 'comment'),
(makelistre(keywords), Keyword),
(makelistre(opts), Name.Constant),
# Actions
(makelistre(actions), String),
(r'stats/'+makelistre(actions), String),
(r'log/'+makelistre(actions)+r'=', String),
(makelistre(acls), Keyword),
(ip_re+r'(?:/(?:'+ip_re+r')|\d+)?', Number),
(r'\b\d+\b', Number),
(r'\S+', Text),
],
'comment': [
(r'\s*TAG:.*', String.Escape, '#pop'),
(r'.*', Comment, '#pop'),
],
}
class DebianControlLexer(RegexLexer):
"""
Lexer for Debian ``control`` files and ``apt-cache show <pkg>`` outputs.
*New in Pygments 0.9.*
"""
name = 'Debian Control file'
aliases = ['control']
filenames = ['control']
tokens = {
'root': [
(r'^(Description)', Keyword, 'description'),
(r'^(Maintainer)(:\s*)', bygroups(Keyword, Text), 'maintainer'),
(r'^((Build-)?Depends)', Keyword, 'depends'),
(r'^((?:Python-)?Version)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^((?:Installed-)?Size)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^(MD5Sum|SHA1|SHA256)(:\s*)([^\s]+)$',
bygroups(Keyword, Text, Number)),
(r'^([a-zA-Z\-0-9\.]*?)(:\s*)(.*?)$',
bygroups(Keyword, Whitespace, String)),
],
'maintainer': [
(r'<[^>]+>', Generic.Strong),
(r'<[^>]+>$', Generic.Strong, '#pop'),
(r',\n?', Text),
(r'.', Text),
],
'description': [
(r'(.*)(Homepage)(: )([^\s]+)', bygroups(Text, String, Name, Name.Class)),
(r':.*\n', Generic.Strong),
(r' .*\n', Text),
('', Text, '#pop'),
],
'depends': [
(r':\s*', Text),
(r'(\$)(\{)(\w+\s*:\s*\w+)', bygroups(Operator, Text, Name.Entity)),
(r'\(', Text, 'depend_vers'),
(r',', Text),
(r'\|', Operator),
(r'[\s]+', Text),
(r'[}\)]\s*$', Text, '#pop'),
(r'[}]', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
],
'depend_vers': [
(r'\),', Text, '#pop'),
(r'\)[^,]', Text, '#pop:2'),
(r'([><=]+)(\s*)([^\)]+)', bygroups(Operator, Text, Number))
]
}
class YamlLexerContext(LexerContext):
"""Indentation context for the YAML lexer."""
def __init__(self, *args, **kwds):
super(YamlLexerContext, self).__init__(*args, **kwds)
self.indent_stack = []
self.indent = -1
self.next_indent = 0
self.block_scalar_indent = None
class YamlLexer(ExtendedRegexLexer):
"""
Lexer for `YAML <http://yaml.org/>`_, a human-friendly data serialization
language.
*New in Pygments 0.11.*
"""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
def something(token_class):
"""Do not produce empty tokens."""
def callback(lexer, match, context):
text = match.group()
if not text:
return
yield match.start(), token_class, text
context.pos = match.end()
return callback
def reset_indent(token_class):
"""Reset the indentation levels."""
def callback(lexer, match, context):
text = match.group()
context.indent_stack = []
context.indent = -1
context.next_indent = 0
context.block_scalar_indent = None
yield match.start(), token_class, text
context.pos = match.end()
return callback
def save_indent(token_class, start=False):
"""Save a possible indentation level."""
def callback(lexer, match, context):
text = match.group()
extra = ''
if start:
context.next_indent = len(text)
if context.next_indent < context.indent:
while context.next_indent < context.indent:
context.indent = context.indent_stack.pop()
if context.next_indent > context.indent:
extra = text[context.indent:]
text = text[:context.indent]
else:
context.next_indent += len(text)
if text:
yield match.start(), token_class, text
if extra:
yield match.start()+len(text), token_class.Error, extra
context.pos = match.end()
return callback
def set_indent(token_class, implicit=False):
"""Set the previously saved indentation level."""
def callback(lexer, match, context):
text = match.group()
if context.indent < context.next_indent:
context.indent_stack.append(context.indent)
context.indent = context.next_indent
if not implicit:
context.next_indent += len(text)
yield match.start(), token_class, text
context.pos = match.end()
return callback
def set_block_scalar_indent(token_class):
"""Set an explicit indentation level for a block scalar."""
def callback(lexer, match, context):
text = match.group()
context.block_scalar_indent = None
if not text:
return
increment = match.group(1)
if increment:
current_indent = max(context.indent, 0)
increment = int(increment)
context.block_scalar_indent = current_indent + increment
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_block_scalar_empty_line(indent_token_class, content_token_class):
"""Process an empty line in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if (context.block_scalar_indent is None or
len(text) <= context.block_scalar_indent):
if text:
yield match.start(), indent_token_class, text
else:
indentation = text[:context.block_scalar_indent]
content = text[context.block_scalar_indent:]
yield match.start(), indent_token_class, indentation
yield (match.start()+context.block_scalar_indent,
content_token_class, content)
context.pos = match.end()
return callback
def parse_block_scalar_indent(token_class):
"""Process indentation spaces in a block scalar."""
def callback(lexer, match, context):
text = match.group()
if context.block_scalar_indent is None:
if len(text) <= max(context.indent, 0):
context.stack.pop()
context.stack.pop()
return
context.block_scalar_indent = len(text)
else:
if len(text) < context.block_scalar_indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
def parse_plain_scalar_indent(token_class):
"""Process indentation spaces in a plain scalar."""
def callback(lexer, match, context):
text = match.group()
if len(text) <= context.indent:
context.stack.pop()
context.stack.pop()
return
if text:
yield match.start(), token_class, text
context.pos = match.end()
return callback
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Tag), 'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Tag), 'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)', reset_indent(Name.Namespace),
'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)', save_indent(Text, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text, Number), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text, Keyword.Type, Text, Keyword.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text), '#pop:2'),
# whitespaces preceeding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Name.Variable),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Keyword.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Keyword.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Label),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Variable),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', String, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', String, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text),
# line breaks
(r'\n+', Text),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Name.Variable),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text, Name.Constant)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text)),
# line content
(r'[^\n\r\f\v]+', Name.Constant),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', String),
# the closing quote
(r'\'', String, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', String),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
String.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', String),
# the closing quote
(r'"', String, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text),
# line breaks
(r'\n+', Text),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Name.Namespace), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+', Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text),
# line breaks are ignored
(r'\n+', Text),
# other whitespaces are a part of the value
(r'[ ]+', Name.Variable),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Name.Variable),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YamlLexerContext(text, 0)
return super(YamlLexer, self).get_tokens_unprocessed(text, context)
class LighttpdConfLexer(RegexLexer):
"""
Lexer for `Lighttpd <http://lighttpd.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Lighttpd configuration file'
aliases = ['lighty', 'lighttpd']
filenames = []
mimetypes = ['text/x-lighttpd-conf']
tokens = {
'root': [
(r'#.*\n', Comment.Single),
(r'/\S*', Name), # pathname
(r'[a-zA-Z._-]+', Keyword),
(r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number),
(r'[0-9]+', Number),
(r'=>|=~|\+=|==|=|\+', Operator),
(r'\$[A-Z]+', Name.Builtin),
(r'[(){}\[\],]', Punctuation),
(r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double),
(r'\s+', Text),
],
}
class NginxConfLexer(RegexLexer):
"""
Lexer for `Nginx <http://nginx.net/>`_ configuration files.
*New in Pygments 0.11.*
"""
name = 'Nginx configuration file'
aliases = ['nginx']
filenames = []
mimetypes = ['text/x-nginx-conf']
tokens = {
'root': [
(r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Text, Name)),
(r'[^\s;#]+', Keyword, 'stmt'),
include('base'),
],
'block': [
(r'}', Punctuation, '#pop:2'),
(r'[^\s;#]+', Keyword.Namespace, 'stmt'),
include('base'),
],
'stmt': [
(r'{', Punctuation, 'block'),
(r';', Punctuation, '#pop'),
include('base'),
],
'base': [
(r'#.*\n', Comment.Single),
(r'on|off', Name.Constant),
(r'\$[^\s;#()]+', Name.Variable),
(r'([a-z0-9.-]+)(:)([0-9]+)',
bygroups(Name, Punctuation, Number.Integer)),
(r'[a-z-]+/[a-z-+]+', String), # mimetype
#(r'[a-zA-Z._-]+', Keyword),
(r'[0-9]+[km]?\b', Number.Integer),
(r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Text, String.Regex)),
(r'[:=~]', Punctuation),
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
*New in Pygments 1.2.*
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
#(r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b([A-Za-z_]+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\${)(.+?)(})', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^\)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('keywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.+\n', Comment),
]
}
|
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
from astropy import units as u
from astropy.wcs import WCS
from six import string_types
import statsmodels.api as sm
from warnings import warn
from astropy.utils.console import ProgressBar
from itertools import product
from ..psds import pspec, make_radial_arrays
from ..base_statistic import BaseStatisticMixIn
from ...io import common_types, threed_types, input_data
from ..stats_utils import common_scale, fourier_shift, pixel_shift
from ..fitting_utils import clip_func, residual_bootstrap
from ..elliptical_powerlaw import (fit_elliptical_powerlaw,
inverse_interval_transform,
inverse_interval_transform_stderr)
from ..stats_warnings import TurbuStatMetricWarning
class SCF(BaseStatisticMixIn):
'''
Computes the Spectral Correlation Function of a data cube
(Rosolowsky et al, 1999).
Parameters
----------
cube : %(dtypes)s
Data cube.
header : FITS header, optional
Header for the cube.
size : int, optional
The total size of the lags used in one dimension in pixels. The maximum
lag size will be (size - 1) / 2 in each direction.
roll_lags : `~numpy.ndarray` or `~astropy.units.Quantity`, optional
Pass a custom array of lag values. An odd number of lags, centered at
0, must be given. If no units are given, it is
assumed that the lags are in pixels. The lags should have
symmetric positive and negative values (e.g., [-1, 0, 1]).
distance : `~astropy.units.Quantity`, optional
Physical distance to the region in the data.
Examples
--------
>>> from spectral_cube import SpectralCube
>>> from turbustat.statistics import SCF
>>> cube = SpectralCube.read("Design4.13co.fits") # doctest: +SKIP
>>> scf = SCF(cube) # doctest: +SKIP
>>> scf.run(verbose=True) # doctest: +SKIP
'''
__doc__ %= {"dtypes": " or ".join(common_types + threed_types)}
def __init__(self, cube, header=None, size=11, roll_lags=None,
distance=None):
super(SCF, self).__init__()
# Set data and header
self.input_data_header(cube, header)
if distance is not None:
self.distance = distance
if roll_lags is None:
if size % 2 == 0:
Warning("Size must be odd. Reducing size to next lowest odd"
" number.")
size = size - 1
self.roll_lags = (np.arange(size) - size // 2) * u.pix
else:
if roll_lags.size % 2 == 0:
Warning("Size of roll_lags must be odd. Reducing size to next"
"lowest odd number.")
roll_lags = roll_lags[: -1]
if isinstance(roll_lags, u.Quantity):
pass
elif isinstance(roll_lags, np.ndarray):
roll_lags = roll_lags * u.pix
else:
raise TypeError("roll_lags must be an astropy.units.Quantity"
" array or a numpy.ndarray.")
self.roll_lags = roll_lags
# Make sure that we can convert the lags
self._to_pixel(self.roll_lags)
self.size = self.roll_lags.size
self._scf_surface = None
self._scf_spectrum_stddev = None
self._fit2D_flag = False
@property
def roll_lags(self):
'''
Pixel values that the cube is rolled by to compute the SCF correlation
surface.
'''
return self._roll_lags
@roll_lags.setter
def roll_lags(self, value):
# Needs to be a quantity with a unit
if not hasattr(value, "unit"):
raise ValueError("roll_lags must be an astropy.units.Quantity.")
try:
self._to_pixel(value)
except u.UnitConversionError:
raise u.UnitConversionError("Cannot convert given roll lags to "
"pixel units. `roll_lags` must have"
" pixel, angular, or physical (if a"
" distance is given) units.")
self._roll_lags = value
@property
def scf_surface(self):
'''
SCF correlation array
'''
return self._scf_surface
@property
def scf_spectrum(self):
'''
Azimuthally averaged 1D SCF spectrum
'''
return self._scf_spectrum
@property
def scf_spectrum_stddev(self):
'''
Standard deviation of the `~SCF.scf_spectrum`
'''
return self._scf_spectrum_stddev
@property
def lags(self):
'''
Values of the lags, in pixels, to compute SCF at
'''
return self._lags
def compute_surface(self, boundary='continuous', show_progress=True):
'''
Computes the SCF up to the given lag value. This is an
expensive operation and could take a long time to calculate.
Parameters
----------
boundary : {"continuous", "cut"}
Treat the boundary as continuous (wrap-around) or cut values
beyond the edge (i.e., for most observational data).
show_progress : bool, optional
Show a progress bar when computing the surface. =
'''
if boundary not in ["continuous", "cut"]:
raise ValueError("boundary must be 'continuous' or 'cut'.")
self._scf_surface = np.zeros((self.size, self.size))
# Convert the lags into pixel units.
pix_lags = self._to_pixel(self.roll_lags).value
dx = pix_lags.copy()
dy = pix_lags.copy()
if show_progress:
bar = ProgressBar(len(dx) * len(dy))
for n, (x_shift, y_shift) in enumerate(product(dx, dy)):
i, j = np.unravel_index(n, (len(dx), len(dy)))
if x_shift == 0 and y_shift == 0:
self._scf_surface[j, i] = 1.
if x_shift == 0:
tmp = self.data
else:
if float(x_shift).is_integer():
shift_func = pixel_shift
else:
shift_func = fourier_shift
tmp = shift_func(self.data, x_shift, axis=1)
if y_shift != 0:
if float(y_shift).is_integer():
shift_func = pixel_shift
else:
shift_func = fourier_shift
tmp = shift_func(tmp, y_shift, axis=2)
if boundary is "cut":
# Always round up to the nearest integer.
x_shift = np.ceil(x_shift).astype(int)
y_shift = np.ceil(y_shift).astype(int)
if x_shift < 0:
x_slice_data = slice(None, tmp.shape[1] + x_shift)
x_slice_tmp = slice(-x_shift, None)
else:
x_slice_data = slice(x_shift, None)
x_slice_tmp = slice(None, tmp.shape[1] - x_shift)
if y_shift < 0:
y_slice_data = slice(None, tmp.shape[2] + y_shift)
y_slice_tmp = slice(-y_shift, None)
else:
y_slice_data = slice(y_shift, None)
y_slice_tmp = slice(None, tmp.shape[2] - y_shift)
data_slice = (slice(None), x_slice_data, y_slice_data)
tmp_slice = (slice(None), x_slice_tmp, y_slice_tmp)
elif boundary is "continuous":
data_slice = (slice(None),) * 3
tmp_slice = (slice(None),) * 3
values = \
np.nansum(((self.data[data_slice] - tmp[tmp_slice]) ** 2),
axis=0) / \
(np.nansum(self.data[data_slice] ** 2, axis=0) +
np.nansum(tmp[tmp_slice] ** 2, axis=0))
scf_value = 1. - \
np.sqrt(np.nansum(values) / np.sum(np.isfinite(values)))
if scf_value > 1:
raise ValueError("Cannot have a correlation above 1. Check "
"your input data. Contact the TurbuStat "
"authors if the problem persists.")
self._scf_surface[j, i] = scf_value
if show_progress:
bar.update(n + 1)
def compute_spectrum(self, **kwargs):
'''
Compute the 1D spectrum as a function of lag. Can optionally
use log-spaced bins. kwargs are passed into the pspec function,
which provides many options. The default settings are applicable in
nearly all use cases.
Parameters
----------
kwargs : passed to `turbustat.statistics.psds.pspec`
'''
# If scf_surface hasn't been computed, do it
if self.scf_surface is None:
self.compute_surface()
if kwargs.get("logspacing"):
warn("Disabled log-spaced bins. This does not work well for the"
" SCF.", TurbuStatMetricWarning)
kwargs.pop('logspacing')
if kwargs.get("theta_0"):
azim_constraint_flag = True
else:
azim_constraint_flag = False
out = pspec(self.scf_surface, return_stddev=True,
logspacing=False, return_freqs=False, **kwargs)
self._azim_constraint_flag = azim_constraint_flag
if azim_constraint_flag:
self._lags, self._scf_spectrum, self._scf_spectrum_stddev, \
self._azim_mask = out
else:
self._lags, self._scf_spectrum, self._scf_spectrum_stddev = out
roll_lag_diff = np.abs(self.roll_lags[1] - self.roll_lags[0])
self._lags = self._lags * roll_lag_diff
def fit_plaw(self, xlow=None, xhigh=None, verbose=False, bootstrap=False,
**bootstrap_kwargs):
'''
Fit a power-law to the SCF spectrum.
Parameters
----------
xlow : `~astropy.units.Quantity`, optional
Lower lag value limit to consider in the fit.
xhigh : `~astropy.units.Quantity`, optional
Upper lag value limit to consider in the fit.
verbose : bool, optional
Show fit summary when enabled.
'''
pix_lags = self._to_pixel(self.lags)
x = np.log10(pix_lags.value)
y = np.log10(self.scf_spectrum)
if xlow is not None:
if not isinstance(xlow, u.Quantity):
raise TypeError("xlow must be an astropy.units.Quantity.")
# Convert xlow into the same units as the lags
xlow = self._to_pixel(xlow)
self._xlow = xlow
lower_limit = x >= np.log10(xlow.value)
else:
lower_limit = \
np.ones_like(self.scf_spectrum, dtype=bool)
self._xlow = np.abs(self.lags).min()
if xhigh is not None:
if not isinstance(xhigh, u.Quantity):
raise TypeError("xlow must be an astropy.units.Quantity.")
# Convert xhigh into the same units as the lags
xhigh = self._to_pixel(xhigh)
self._xhigh = xhigh
upper_limit = x <= np.log10(xhigh.value)
else:
upper_limit = \
np.ones_like(self.scf_spectrum, dtype=bool)
self._xhigh = np.abs(self.lags).max()
within_limits = np.logical_and(lower_limit, upper_limit)
if not within_limits.any():
raise ValueError("Limits have removed all lag values. Make xlow"
" and xhigh less restrictive.")
y = y[within_limits]
x = x[within_limits]
x = sm.add_constant(x)
# If the std were computed, use them as weights
# Converting to the log stds doesn't matter since the weights
# remain proportional to 1/sigma^2, and an overal normalization is
# applied in the fitting routine.
weights = self.scf_spectrum_stddev[within_limits] ** -2
model = sm.WLS(y, x, missing='drop', weights=weights)
self.fit = model.fit(cov_type='HC3')
self._slope = self.fit.params[1]
if bootstrap:
stderrs = residual_bootstrap(self.fit,
**bootstrap_kwargs)
self._slope_err = stderrs[1]
else:
self._slope_err = self.fit.bse[1]
self._bootstrap_flag = bootstrap
if verbose:
print(self.fit.summary())
if self._bootstrap_flag:
print("Bootstrapping used to find stderrs! "
"Errors may not equal those shown above.")
@property
def slope(self):
'''
SCF spectrum slope
'''
return self._slope
@property
def slope_err(self):
'''
1-sigma error on the SCF spectrum slope
'''
return self._slope_err
@property
def xlow(self):
'''
Lower limit for lags to consider in fits.
'''
return self._xlow
@property
def xhigh(self):
'''
Upper limit for lags to consider in fits.
'''
return self._xhigh
def fitted_model(self, xvals):
'''
Computes the modelled power-law using the given x values.
Parameters
----------
xvals : `~astropy.Quantity`
Values of lags to compute the model at.
Returns
-------
model_values : `~numpy.ndarray`
Values of the model at the given values. Equivalent to log values
of the SCF spectrum.
'''
if not isinstance(xvals, u.Quantity):
raise TypeError("xvals must be an astropy.units.Quantity.")
# Convert into the lag units used for the fit
xvals = self._to_pixel(xvals).value
model_values = \
self.fit.params[0] + self.fit.params[1] * np.log10(xvals)
return 10**model_values
def fit_2Dplaw(self, fit_method='LevMarq', p0=(), xlow=None,
xhigh=None, bootstrap=True, niters=100, use_azimmask=False):
'''
Model the 2D power-spectrum surface with an elliptical power-law model.
Parameters
----------
fit_method : str, optional
The algorithm fitting to use. Only 'LevMarq' is currently
available.
p0 : tuple, optional
Initial parameters for fitting. If no values are given, the initial
parameters start from the 1D fit parameters.
xlow : `~astropy.units.Quantity`, optional
Lower lag value limit to consider in the fit.
xhigh : `~astropy.units.Quantity`, optional
Upper lag value limit to consider in the fit.
bootstrap : bool, optional
Bootstrap using the model residuals to estimate the parameter
standard errors. This tends to give more realistic intervals than
the covariance matrix.
niters : int, optional
Number of bootstrap iterations.
use_azimmask : bool, optional
Use the azimuthal mask defined for the 1D spectrum, when azimuthal
limit have been given.
'''
# Adjust the distance based on the separation of the lags
pix_lag_diff = np.diff(self._to_pixel(self.lags))[0].value
if xlow is not None:
# Convert xlow into the same units as the lags
xlow = self._to_pixel(xlow)
self._xlow = xlow
else:
self._xlow = np.abs(self.lags).min()
if xhigh is not None:
# Convert xhigh into the same units as the lags
xhigh = self._to_pixel(xhigh)
self._xhigh = xhigh
else:
self._xhigh = np.abs(self.lags).max()
xlow_pix = self._to_pixel(self.xlow).value
xhigh_pix = self._to_pixel(self.xhigh).value
yy, xx = make_radial_arrays(self.scf_surface.shape)
# Needed to make sure the definition of theta is consistent with
# azimuthal masking and the elliptical p-law
yy = yy[::-1]
xx = xx[::-1]
dists = np.sqrt(yy**2 + xx**2) * pix_lag_diff
mask = clip_func(dists, xlow_pix, xhigh_pix)
if hasattr(self, "_azim_mask") and use_azimmask:
mask = np.logical_and(mask, self._azim_mask)
if not mask.any():
raise ValueError("Limits have removed all lag values. Make xlow"
" and xhigh less restrictive.")
if len(p0) == 0:
if hasattr(self, 'slope'):
slope_guess = self.slope
amp_guess = self.fit.params[0]
else:
# Let's guess it's going to be ~ -0.2
slope_guess = -0.2
amp_guess = 1.0
# Use an initial guess pi / 2 for theta
theta = np.pi / 2.
# For ellip = 0.5
ellip_conv = 0
p0 = (amp_guess, ellip_conv, theta, slope_guess)
params, stderrs, fit_2Dmodel, fitter = \
fit_elliptical_powerlaw(np.log10(self.scf_surface[mask]),
xx[mask],
yy[mask], p0,
fit_method=fit_method,
bootstrap=bootstrap,
niters=niters)
self.fit2D = fit_2Dmodel
self._fitter = fitter
self._slope2D = params[3]
self._slope2D_err = stderrs[3]
self._theta2D = params[2] % np.pi
self._theta2D_err = stderrs[2]
# Apply transforms to convert back to the [0, 1) ellipticity range
self._ellip2D = inverse_interval_transform(params[1], 0, 1)
self._ellip2D_err = \
inverse_interval_transform_stderr(stderrs[1], params[1], 0, 1)
self._fit2D_flag = True
@property
def slope2D(self):
'''
Fitted slope of the 2D power-law.
'''
return self._slope2D
@property
def slope2D_err(self):
'''
Slope standard error of the 2D power-law.
'''
return self._slope2D_err
@property
def theta2D(self):
'''
Fitted position angle of the 2D power-law.
'''
return self._theta2D
@property
def theta2D_err(self):
'''
Position angle standard error of the 2D power-law.
'''
return self._theta2D_err
@property
def ellip2D(self):
'''
Fitted ellipticity of the 2D power-law.
'''
return self._ellip2D
@property
def ellip2D_err(self):
'''
Ellipticity standard error of the 2D power-law.
'''
return self._ellip2D_err
def plot_fit(self, save_name=None, show_radial=True,
show_residual=True,
show_surface=True, contour_color='k',
cmap='viridis', data_color='r', fit_color='k',
xunit=u.pix):
'''
Plot the SCF surface, radial profiles, and associated fits.
Parameters
----------
save_name : str, optional
Save name for the figure. Enables saving the plot.
show_radial : bool, optional
Show the azimuthally-averaged 1D SCF spectrum and fit.
show_surface : bool, optional
Show the SCF surface and (if performed) fit.
show_residual : bool, optional
Plot the residuals for the 1D SCF fit.
contour_color : {str, RGB tuple}, optional
Color of the 2D fit contours.
cmap : {str, matplotlib color map}, optional
Colormap to use in the plots. Default is viridis.
data_color : {str, RGB tuple}, optional
Color of the azimuthally-averaged data.
fit_color : {str, RGB tuple}, optional
Color of the 1D fit.
xunit : `~astropy.units.Unit`, optional
Choose the angular unit to convert to when ang_units is enabled.
'''
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = plt.gcf()
axes = plt.gcf().get_axes()
if len(axes) == 3:
ax, ax2, ax_r = axes
elif len(axes) == 2:
if show_surface and not show_residual:
ax, ax2 = axes
else:
ax2, ax_r = axes
elif len(axes) == 1:
if show_radial:
ax = axes[0]
else:
ax2 = axes[0]
else:
if show_surface:
if show_radial:
ax = plt.subplot2grid((4, 4), (0, 2), colspan=2, rowspan=4)
if show_residual:
ax2 = plt.subplot2grid((4, 4), (0, 0), colspan=2,
rowspan=3)
ax_r = plt.subplot2grid((4, 4), (3, 0), colspan=2,
rowspan=1, sharex=ax2)
else:
ax2 = plt.subplot2grid((4, 4), (0, 0), colspan=2,
rowspan=4)
else:
ax = plt.subplot2grid((4, 4), (0, 0), colspan=4, rowspan=4)
else:
if show_residual:
ax2 = plt.subplot2grid((4, 4), (0, 0), colspan=4,
rowspan=3)
ax_r = plt.subplot2grid((4, 4), (3, 0), colspan=4,
rowspan=1, sharex=ax2)
else:
ax2 = plt.subplot2grid((4, 4), (0, 0), colspan=4,
rowspan=4)
if show_surface:
im1 = ax.imshow(self.scf_surface, origin="lower",
interpolation="nearest",
cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", "5%", pad="3%")
cb = plt.colorbar(im1, cax=cax)
cb.set_label("SCF Value")
yy, xx = make_radial_arrays(self.scf_surface.shape)
pix_lag_diff = np.diff(self._to_pixel(self.lags))[0].value
dists = np.sqrt(yy**2 + xx**2) * pix_lag_diff
xlow_pix = self._to_pixel(self.xlow).value
xhigh_pix = self._to_pixel(self.xhigh).value
mask = clip_func(dists, xlow_pix, xhigh_pix)
if not mask.all():
ax.contour(mask, colors='b', linestyles='-.', levels=[0.5])
if self._fit2D_flag:
ax.contour(self.fit2D(xx, yy)[::-1], colors=contour_color,
linestyles='-')
if self._azim_constraint_flag:
if not np.all(self._azim_mask):
ax.contour(self._azim_mask, colors='b', linestyles='-.',
levels=[0.5])
else:
warn("Azimuthal mask includes all data. No contours will "
"be drawn.")
if show_radial:
pix_lags = self._to_pixel(self.lags)
lags = self._spatial_unit_conversion(pix_lags, xunit).value
ax2.errorbar(lags, self.scf_spectrum,
yerr=self.scf_spectrum_stddev,
fmt='o', color=data_color,
markersize=5)
ax2.set_xscale("log") # , nonposy='clip')
ax2.set_yscale("log") # , nonposy='clip')
ax2.set_xlim(lags.min() * 0.75, lags.max() * 1.25)
ax2.set_ylim(np.nanmin(self.scf_spectrum) * 0.75,
np.nanmax(self.scf_spectrum) * 1.25)
# Overlay the fit. Use points 5% lower than the min and max.
xvals = np.linspace(lags.min() * 0.95,
lags.max() * 1.05, 50) * xunit
ax2.loglog(xvals, self.fitted_model(xvals), '--', linewidth=2,
label='Fit', color=fit_color)
# Show the fit limits
xlow = self._spatial_unit_conversion(self._xlow, xunit).value
xhigh = self._spatial_unit_conversion(self._xhigh, xunit).value
ax2.axvline(xlow, color='b', alpha=0.5, linestyle='-.')
ax2.axvline(xhigh, color='b', alpha=0.5, linestyle='-.')
ax2.legend()
ax2.set_ylabel("SCF Value")
if show_residual:
resids = self.scf_spectrum - self.fitted_model(pix_lags)
ax_r.errorbar(lags, resids,
yerr=self.scf_spectrum_stddev,
fmt='o', color=data_color,
markersize=5)
ax_r.axvline(xlow, color='b', alpha=0.5, linestyle='-.')
ax_r.axvline(xhigh, color='b', alpha=0.5, linestyle='-.')
ax_r.axhline(0., color=fit_color, linestyle='--')
ax_r.set_ylabel("Residuals")
ax_r.set_xlabel("Lag ({})".format(xunit))
# ax2.get_xaxis().set_ticks([])
else:
ax2.set_xlabel("Lag ({})".format(xunit))
plt.tight_layout()
fig.subplots_adjust(hspace=0.1)
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
def run(self, boundary='continuous',
show_progress=True, xlow=None, xhigh=None,
fit_kwargs={}, fit_2D=True,
fit_2D_kwargs={}, radialavg_kwargs={},
verbose=False, xunit=u.pix, save_name=None):
'''
Computes all SCF outputs.
Parameters
----------
boundary : {"continuous", "cut"}
Treat the boundary as continuous (wrap-around) or cut values
beyond the edge (i.e., for most observational data).
show_progress : bool, optional
Show a progress bar during the creation of the covariance matrix.
xlow : `~astropy.Quantity`, optional
See `~SCF.fit_plaw`.
xhigh : `~astropy.Quantity`, optional
See `~SCF.fit_plaw`.
fit_kwargs : dict, optional
Keyword arguments for `SCF.fit_plaw`. Use the
`xlow` and `xhigh` keywords to provide fit limits.
fit_2D : bool, optional
Fit an elliptical power-law model to the 2D spectrum.
fit_2D_kwargs : dict, optional
Keyword arguments for `SCF.fit_2Dplaw`. Use the
`xlow` and `xhigh` keywords to provide fit limits.
radialavg_kwargs : dict, optional
Passed to `~SCF.compute_spectrum`.
verbose : bool, optional
Enables plotting.
xunit : `~astropy.units.Unit`, optional
Choose the angular unit to convert to when ang_units is enabled.
save_name : str, optional
Save the figure when a file name is given.
'''
self.compute_surface(boundary=boundary, show_progress=show_progress)
self.compute_spectrum(**radialavg_kwargs)
self.fit_plaw(verbose=verbose, xlow=xlow, xhigh=xhigh, **fit_kwargs)
if fit_2D:
self.fit_2Dplaw(xlow=xlow, xhigh=xhigh,
**fit_2D_kwargs)
if verbose:
self.plot_fit(save_name=save_name, xunit=xunit)
return self
class SCF_Distance(object):
'''
Calculates the distance between two data cubes based on their SCF surfaces.
The distance is the L2 norm between the surfaces. We weight the surface by
1/r^2 where r is the distance from the centre.
.. note:: When a pre-computed `~SCF` class is given for `cube1` or `cube2`,
it needs to have the same set of lags between the cubes, defined
by as the angular scales based on the FITS header. If the lags
are not equivalent, the SCF will be re-computed with new lags.
Parameters
----------
cube1 : %(dtypes)s or `~SCF`
Data cube. Or a `~SCF` class can be passed which may be pre-computed.
cube2 : %(dtypes)s or `~SCF`
Data cube. Or a `~SCF` class can be passed which may be pre-computed.
size : int, optional
Maximum size roll, in pixels, over which SCF will be calculated. If
the angular scale is different between the data cubes, the lags are
scaled to have the same angular scales.
boundary : {"continuous", "cut"}
Treat the boundary as continuous (wrap-around) or cut values
beyond the edge (i.e., for most observational data). A two element
list can also be passed for treating the boundaries differently
between the given cubes.
'''
__doc__ %= {"dtypes": " or ".join(common_types + threed_types)}
def __init__(self, cube1, cube2, size=11, boundary='continuous',
show_progress=True):
if isinstance(cube1, SCF):
self.scf1 = cube1
_has_data1 = False
else:
dataset1 = input_data(cube1, no_header=False)
_has_data1 = True
if isinstance(cube2, SCF):
self.scf2 = cube2
_has_data2 = False
else:
dataset2 = input_data(cube2, no_header=False)
_has_data2 = True
# Create a default set of lags, in pixels
if size % 2 == 0:
Warning("Size must be odd. Reducing size to next lowest odd"
" number.")
size = size - 1
self.size = size
roll_lags = (np.arange(size) - size // 2) * u.pix
# Now adjust the lags such they have a common scaling when the datasets
# are not on a common grid.
wcs1 = WCS(dataset1[1]) if _has_data1 else self.scf1._wcs
wcs2 = WCS(dataset2[1]) if _has_data2 else self.scf2._wcs
scale = common_scale(wcs1, wcs2)
if scale == 1.0:
if not _has_data1:
roll_lags1 = self.scf1._to_pixel(self.scf1.roll_lags)
else:
roll_lags1 = roll_lags
if not _has_data1:
roll_lags2 = self.scf2._to_pixel(self.scf2.roll_lags)
else:
roll_lags2 = roll_lags
if not (roll_lags1 == roll_lags2).all():
raise ValueError("The roll lags must match when using pre-computed SCFs.")
elif scale > 1.0:
roll_lags1 = scale * roll_lags
roll_lags2 = roll_lags
else:
roll_lags1 = roll_lags
roll_lags2 = roll_lags / float(scale)
if not isinstance(boundary, string_types):
if len(boundary) != 2:
raise ValueError("If boundary is not a string, it must be a "
"list or array of 2 string elements.")
else:
boundary = [boundary, boundary]
# if fiducial_model is not None:
# self.scf1 = fiducial_model
if _has_data1:
self.scf1 = SCF(cube1, roll_lags=roll_lags1)
needs_run1 = True
else:
needs_run1 = False
if roll_lags1.size != self.scf1.roll_lags.size:
lag_check = True
else:
lag_check = (roll_lags1 == self.scf1._to_pixel(self.scf1.roll_lags)).all()
if not lag_check:
warn("SCF given as cube1 needs to be recomputed as the lags"
" must match the common set of lags between the two data"
" sets. Recomputing SCF.")
needs_run1 = True
self.scf1.roll_lags = roll_lags1
compute_check = hasattr(self.scf1, "_scf_spectrum")
if not compute_check:
warn("SCF given as cube1 does not have an SCF"
" spectrum computed. Recomputing SCF.")
needs_run1 = True
if needs_run1:
self.scf1.compute_surface(boundary=boundary[0],
show_progress=show_progress)
# This is for the plot, not the distance, so stick with default
# params
self.scf1.compute_spectrum()
if _has_data2:
self.scf2 = SCF(cube2, roll_lags=roll_lags2)
needs_run2 = True
else:
needs_run2 = False
if roll_lags2.size != self.scf2.roll_lags.size:
lag_check = True
else:
lag_check = (roll_lags2 == self.scf2._to_pixel(self.scf2.roll_lags)).all()
if not lag_check:
warn("SCF given as cube2 needs to be recomputed as the lags"
" must match the common set of lags between the two data"
" sets. Recomputing SCF.")
needs_run2 = True
self.scf2.roll_lags = roll_lags2
compute_check = hasattr(self.scf2, "_scf_spectrum")
if not compute_check:
warn("SCF given as cube2 does not have an SCF"
" spectrum computed. Recomputing SCF.")
needs_run2 = True
if needs_run2:
self.scf2.compute_surface(boundary=boundary[1],
show_progress=show_progress)
# This is for the plot, not the distance, so stick with default
# params
self.scf2.compute_spectrum()
if not needs_run1 and not needs_run2:
self.size = self.scf1.size
def distance_metric(self, weighted=True, verbose=False,
plot_kwargs1={'color': 'b', 'marker': 'D',
'label': '1'},
plot_kwargs2={'color': 'g', 'marker': 'o',
'label': '2'},
xunit=u.pix, save_name=None):
'''
Compute the distance between the surfaces.
Parameters
----------
weighted : bool, optional
Sets whether to apply the 1/r weighting to the distance.
verbose : bool, optional
Enables plotting.
plot_kwargs1 : dict, optional
Pass kwargs to `~matplotlib.pyplot.plot` for
`cube1`.
plot_kwargs2 : dict, optional
Pass kwargs to `~matplotlib.pyplot.plot` for
`cube2`.
xunit : `~astropy.units.Unit`, optional
Unit of the x-axis in the plot in pixel, angular, or
physical units.
save_name : str,optional
Save the figure when a file name is given.
'''
# Since the angular scales are matched, we can assume that they will
# have the same weights. So just use the shape of the lags to create
# the weight surface.
dx = np.arange(self.size) - self.size // 2
dy = np.arange(self.size) - self.size // 2
a, b = np.meshgrid(dx, dy)
if weighted:
dist_weight = 1 / np.sqrt(a ** 2 + b ** 2)
# Centre pixel set to 1
dist_weight[np.where((a == 0) & (b == 0))] = 1.
else:
dist_weight = np.ones((self.size, self.size))
difference = (self.scf1.scf_surface - self.scf2.scf_surface) ** 2. * \
dist_weight
self.distance = np.sqrt(np.sum(difference) / np.sum(dist_weight))
if verbose:
import matplotlib.pyplot as plt
defaults1 = {'color': 'b', 'marker': 'D', 'label': '1'}
defaults2 = {'color': 'g', 'marker': 'o', 'label': '2'}
for key in defaults1:
if key not in plot_kwargs1:
plot_kwargs1[key] = defaults1[key]
for key in defaults2:
if key not in plot_kwargs2:
plot_kwargs2[key] = defaults2[key]
fig = plt.figure()
ax0 = fig.add_subplot(2, 2, 1)
ax1 = fig.add_subplot(2, 2, 2, sharex=ax0, sharey=ax0)
ax2 = fig.add_subplot(2, 2, 3, sharex=ax0, sharey=ax0)
ax3 = fig.add_subplot(2, 2, 4)
vmin = min(self.scf1.scf_surface.min(),
self.scf2.scf_surface.min())
im0 = ax0.imshow(self.scf1.scf_surface, origin="lower",
interpolation="nearest", vmin=vmin)
ax0.set_title(plot_kwargs1['label'])
fig.colorbar(im0, ax=ax0)
im1 = ax1.imshow(self.scf2.scf_surface, origin="lower",
interpolation="nearest", vmin=vmin)
ax1.set_title(plot_kwargs2['label'])
fig.colorbar(im1, ax=ax1)
im2 = ax2.imshow(difference, origin="lower",
interpolation="nearest")
ax2.set_title("")
fig.colorbar(im2, ax=ax2)
pix_lags1 = self.scf1._to_pixel(self.scf1.lags)
lags1 = self.scf1._spatial_unit_conversion(pix_lags1, xunit).value
pix_lags2 = self.scf2._to_pixel(self.scf2.lags)
lags2 = self.scf2._spatial_unit_conversion(pix_lags2, xunit).value
ax3.errorbar(lags1, self.scf1.scf_spectrum,
yerr=self.scf1.scf_spectrum_stddev,
fmt=plot_kwargs1['marker'],
color=plot_kwargs1['color'],
markersize=5,
label=plot_kwargs1['label'])
ax3.errorbar(lags2, self.scf2.scf_spectrum,
yerr=self.scf2.scf_spectrum_stddev,
fmt=plot_kwargs2['marker'],
color=plot_kwargs2['color'],
markersize=5,
label=plot_kwargs2['label'])
ax3.set_xscale("log")
ax3.set_yscale("log")
ax3.set_xlim(min(lags1.min(), lags2.min()) * 0.75,
max(lags1.max(), lags2.max()) * 1.25)
ax3.set_ylim(min(self.scf1.scf_spectrum.min(),
self.scf2.scf_spectrum.min()) * 0.75,
max(self.scf1.scf_spectrum.max(),
self.scf2.scf_spectrum.max()) * 1.25)
ax3.grid(True)
ax3.set_xlabel("Lags ({})".format(xunit))
plt.tight_layout()
if save_name is not None:
plt.savefig(save_name)
plt.close()
else:
plt.show()
return self
|
|
#!/usr/bin/env python
"""API handlers for dealing with flows."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
import itertools
import re
from future.builtins import str
from future.utils import iteritems
from future.utils import itervalues
from typing import Iterable
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_stats as rdf_client_stats
from grr_response_core.lib.rdfvalues import flows as rdf_flows
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto.api import flow_pb2
from grr_response_server import access_control
from grr_response_server import data_store
from grr_response_server import data_store_utils
from grr_response_server import flow
from grr_response_server import flow_base
from grr_response_server import instant_output_plugin
from grr_response_server import notification
from grr_response_server import output_plugin
from grr_response_server.databases import db
from grr_response_server.gui import api_call_handler_base
from grr_response_server.gui import api_call_handler_utils
from grr_response_server.gui import archive_generator
from grr_response_server.gui.api_plugins import client
from grr_response_server.gui.api_plugins import output_plugin as api_output_plugin
from grr_response_server.rdfvalues import flow_objects as rdf_flow_objects
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import objects as rdf_objects
from grr_response_server.rdfvalues import output_plugin as rdf_output_plugin
class FlowNotFoundError(api_call_handler_base.ResourceNotFoundError):
"""Raised when a flow is not found."""
class OutputPluginNotFoundError(api_call_handler_base.ResourceNotFoundError):
"""Raised when an output plugin is not found."""
class ApiFlowId(rdfvalue.RDFString):
"""Class encapsulating flows ids."""
def __init__(self, initializer=None):
super(ApiFlowId, self).__init__(initializer=initializer)
# TODO(user): move this to a separate validation method when
# common RDFValues validation approach is implemented.
if self._value:
components = self.Split()
for component in components:
try:
rdfvalue.SessionID.ValidateID(component)
except ValueError as e:
raise ValueError("Invalid flow id: %s (%s)" % (self._value, e))
def Split(self):
if not self._value:
raise ValueError("Can't call Split() on an empty client id.")
return self._value.split("/")
class ApiFlowDescriptor(rdf_structs.RDFProtoStruct):
"""Descriptor containing information about a flow class."""
protobuf = flow_pb2.ApiFlowDescriptor
def GetDefaultArgsClass(self):
return rdfvalue.RDFValue.classes.get(self.args_type)
def _GetArgsDescription(self, args_type):
"""Get a simplified description of the args_type for a flow."""
args = {}
if args_type:
for type_descriptor in args_type.type_infos:
if not type_descriptor.hidden:
args[type_descriptor.name] = {
"description": type_descriptor.description,
"default": type_descriptor.default,
"type": "",
}
if type_descriptor.type:
args[type_descriptor.name]["type"] = type_descriptor.type.__name__
return args
def _GetCallingPrototypeAsString(self, flow_cls):
"""Get a description of the calling prototype for this flow class."""
output = []
output.append("flow.StartFlow(client_id=client_id, ")
output.append("flow_cls=%s.%s, " %
(flow_cls.__module__.split(".")[-1], flow_cls.__name__))
prototypes = []
if flow_cls.args_type:
for type_descriptor in flow_cls.args_type.type_infos:
if not type_descriptor.hidden:
prototypes.append("%s=%s" %
(type_descriptor.name, type_descriptor.name))
output.append(", ".join(prototypes))
output.append(")")
return "".join(output)
def _GetFlowArgsHelpAsString(self, flow_cls):
"""Get a string description of the calling prototype for this flow."""
output = [
" Call Spec:",
" %s" % self._GetCallingPrototypeAsString(flow_cls), ""
]
arg_list = sorted(
iteritems(self._GetArgsDescription(flow_cls.args_type)),
key=lambda x: x[0])
if not arg_list:
output.append(" Args: None")
else:
output.append(" Args:")
for arg, val in arg_list:
output.append(" %s" % arg)
output.append(" description: %s" % val["description"])
output.append(" type: %s" % val["type"])
output.append(" default: %s" % val["default"])
output.append("")
return "\n".join(output)
def _GetFlowDocumentation(self, flow_cls):
return "%s\n\n%s" % (getattr(flow_cls, "__doc__",
""), self._GetFlowArgsHelpAsString(flow_cls))
def InitFromFlowClass(self, flow_cls, token=None):
if not token:
raise ValueError("token can't be None")
self.name = flow_cls.__name__
self.friendly_name = flow_cls.friendly_name
self.category = flow_cls.category.strip("/")
self.doc = self._GetFlowDocumentation(flow_cls)
self.args_type = flow_cls.args_type.__name__
self.default_args = flow_cls.GetDefaultArgs(username=token.username)
self.behaviours = sorted(flow_cls.behaviours)
return self
class ApiFlowReference(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiFlowReference
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
def FromFlowReference(self, reference):
self.flow_id = reference.flow_id
self.client_id = reference.client_id
return self
class ApiFlow(rdf_structs.RDFProtoStruct):
"""ApiFlow is used when rendering responses.
ApiFlow is meant to be more lightweight than automatically generated AFF4
representation. It's also meant to contain only the information needed by
the UI and and to not expose implementation defails.
"""
protobuf = flow_pb2.ApiFlow
rdf_deps = [
api_call_handler_utils.ApiDataObject,
client.ApiClientId,
"ApiFlow", # TODO(user): recursive dependency.
ApiFlowId,
ApiFlowReference,
rdf_flow_runner.FlowContext,
rdf_flow_runner.FlowRunnerArgs,
rdfvalue.RDFDatetime,
rdfvalue.SessionID,
]
def GetArgsClass(self):
flow_name = self.name
if not flow_name:
flow_name = self.runner_args.flow_name
if flow_name:
flow_cls = registry.FlowRegistry.FlowClassByName(flow_name)
# The required protobuf for this class is in args_type.
return flow_cls.args_type
def InitFromFlowObject(self,
flow_obj,
with_args=True,
with_state_and_context=False):
try:
self.flow_id = flow_obj.flow_id
self.client_id = flow_obj.client_id
# TODO(amoser): Get rid of all urns.
self.urn = flow_obj.long_flow_id
self.name = flow_obj.flow_class_name
self.started_at = flow_obj.create_time
self.last_active_at = flow_obj.last_update_time
self.creator = flow_obj.creator
if flow_obj.client_crash_info:
self.state = "CLIENT_CRASHED"
elif flow_obj.pending_termination:
self.state = "ERROR"
self.status = ("Pending termination: %s" %
flow_obj.pending_termination.reason)
else:
context_state_map = {1: "RUNNING", 2: "TERMINATED", 3: "ERROR"}
self.state = context_state_map[int(flow_obj.flow_state)]
if with_state_and_context:
outstanding_requests = (
flow_obj.next_outbound_id - flow_obj.next_request_to_process)
self.context = rdf_flow_runner.FlowContext(
# TODO(amoser): No need to set this in all cases once the legacy API
# is removed.
client_resources=rdf_client_stats.ClientResources(
cpu_usage=rdf_client_stats.CpuSeconds()),
create_time=flow_obj.create_time,
creator=flow_obj.creator,
current_state=flow_obj.current_state,
next_outbound_id=flow_obj.next_outbound_id,
outstanding_requests=outstanding_requests,
state=self.state,
# TODO(amoser): Get rid of all urns.
session_id=flow_obj.long_flow_id,
)
if flow_obj.output_plugins_states:
self.context.output_plugins_states = flow_obj.output_plugins_states
if flow_obj.network_bytes_sent:
self.context.network_bytes_sent = flow_obj.network_bytes_sent
self.context.client_resources.network_bytes_sent = (
flow_obj.network_bytes_sent)
if flow_obj.cpu_time_used:
self.context.client_resources.cpu_time_used = flow_obj.cpu_time_used
if flow_obj.error_message:
self.context.status = flow_obj.error_message
if flow_obj.backtrace:
self.context.backtrace = flow_obj.backtrace
if with_args:
try:
self.args = flow_obj.args
except ValueError:
# If args class name has changed, ValueError will be raised. Handling
# this gracefully - we should still try to display some useful info
# about the flow.
pass
self.runner_args = rdf_flow_runner.FlowRunnerArgs(
client_id=flow_obj.client_id,
flow_name=flow_obj.flow_class_name,
notify_to_user=flow_base.FlowBase(flow_obj).ShouldSendNotifications())
if flow_obj.output_plugins:
self.runner_args.output_plugins = flow_obj.output_plugins
if flow_obj.HasField("cpu_limit"):
self.runner_args.cpu_limit = flow_obj.cpu_limit
if flow_obj.HasField("network_bytes_limit"):
self.runner_args.cpu_limit = flow_obj.network_bytes_limit
if flow_obj.original_flow.flow_id:
self.original_flow = ApiFlowReference().FromFlowReference(
flow_obj.original_flow)
if with_state_and_context and flow_obj.persistent_data.ToDict():
self.state_data = (
api_call_handler_utils.ApiDataObject().InitFromDataObject(
flow_obj.persistent_data))
except Exception as e: # pylint: disable=broad-except
self.internal_error = "Error while opening flow: %s" % str(e)
return self
class ApiFlowRequest(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiFlowRequest
rdf_deps = [
rdf_flows.GrrMessage,
rdf_flow_runner.RequestState,
]
class ApiFlowResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiFlowResult
rdf_deps = [
rdfvalue.RDFDatetime,
]
def GetPayloadClass(self):
return rdfvalue.RDFValue.classes[self.payload_type]
def InitFromFlowResult(self, result):
p = result.payload
self.payload_type = compatibility.GetName(p.__class__)
self.payload = p
self.timestamp = result.timestamp
return self
class ApiFlowLog(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiFlowLog
rdf_deps = [ApiFlowId, rdfvalue.RDFDatetime]
def InitFromFlowLogEntry(self, fl, flow_id):
self.log_message = fl.message
self.flow_id = flow_id
self.timestamp = fl.timestamp
return self
class ApiGetFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiGetFlowArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiGetFlowHandler(api_call_handler_base.ApiCallHandler):
"""Renders given flow.
Only top-level flows can be targeted.
"""
args_type = ApiGetFlowArgs
result_type = ApiFlow
def Handle(self, args, token=None):
flow_obj = data_store.REL_DB.ReadFlowObject(
str(args.client_id), str(args.flow_id))
return ApiFlow().InitFromFlowObject(flow_obj, with_state_and_context=True)
class ApiListFlowRequestsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowRequestsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowRequestsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowRequestsResult
rdf_deps = [
ApiFlowRequest,
]
class ApiListFlowRequestsHandler(api_call_handler_base.ApiCallHandler):
"""Renders list of requests of a given flow."""
args_type = ApiListFlowRequestsArgs
result_type = ApiListFlowRequestsResult
def Handle(self, args, token=None):
client_id = args.client_id.ToString()
requests_and_responses = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
client_id, str(args.flow_id))
result = ApiListFlowRequestsResult()
stop = None
if args.count:
stop = args.offset + args.count
for request, response_dict in itertools.islice(requests_and_responses,
args.offset, stop):
request_state = rdf_flow_runner.RequestState(
client_id=client_id,
id=request.request_id,
next_state=request.next_state,
session_id="{}/flows/{}".format(client_id, str(request.flow_id)))
api_request = ApiFlowRequest(
request_id=str(request.request_id), request_state=request_state)
if response_dict:
responses = [
response_dict[i].AsLegacyGrrMessage() for i in sorted(response_dict)
]
for r in responses:
r.ClearPayload()
api_request.responses = responses
result.items.append(api_request)
return result
class ApiListFlowResultsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowResultsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowResultsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowResultsResult
rdf_deps = [
ApiFlowResult,
]
class ApiListFlowResultsHandler(api_call_handler_base.ApiCallHandler):
"""Renders results of a given flow."""
args_type = ApiListFlowResultsArgs
result_type = ApiListFlowResultsResult
def Handle(self, args, token=None):
results = data_store.REL_DB.ReadFlowResults(
str(args.client_id),
str(args.flow_id),
args.offset,
args.count or db.MAX_COUNT,
with_substring=args.filter or None)
total_count = data_store.REL_DB.CountFlowResults(
str(args.client_id), str(args.flow_id))
wrapped_items = [ApiFlowResult().InitFromFlowResult(r) for r in results]
return ApiListFlowResultsResult(
items=wrapped_items, total_count=total_count)
class ApiListFlowLogsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowLogsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowLogsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowLogsResult
rdf_deps = [ApiFlowLog]
class ApiListFlowLogsHandler(api_call_handler_base.ApiCallHandler):
"""Returns a list of logs for the current client and flow."""
args_type = ApiListFlowLogsArgs
result_type = ApiListFlowLogsResult
def Handle(self, args, token=None):
count = args.count or db.MAX_COUNT
logs = data_store.REL_DB.ReadFlowLogEntries(
str(args.client_id), str(args.flow_id), args.offset, count, args.filter)
total_count = data_store.REL_DB.CountFlowLogEntries(
str(args.client_id), str(args.flow_id))
return ApiListFlowLogsResult(
items=[
ApiFlowLog().InitFromFlowLogEntry(log, str(args.flow_id))
for log in logs
],
total_count=total_count)
class ApiGetFlowResultsExportCommandArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiGetFlowResultsExportCommandArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiGetFlowResultsExportCommandResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiGetFlowResultsExportCommandResult
class ApiGetFlowResultsExportCommandHandler(api_call_handler_base.ApiCallHandler
):
"""Renders GRR export tool command line that exports flow results."""
args_type = ApiGetFlowResultsExportCommandArgs
result_type = ApiGetFlowResultsExportCommandResult
def Handle(self, args, token=None):
output_fname = re.sub("[^0-9a-zA-Z]+", "_",
"%s_%s" % (args.client_id, args.flow_id))
code_to_execute = ("""grrapi.Client("%s").Flow("%s").GetFilesArchive()."""
"""WriteToFile("./flow_results_%s.zip")""") % (
args.client_id, args.flow_id, output_fname)
export_command_str = " ".join([
config.CONFIG["AdminUI.export_command"], "--exec_code",
utils.ShellQuote(code_to_execute)
])
return ApiGetFlowResultsExportCommandResult(command=export_command_str)
class ApiGetFlowFilesArchiveArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiGetFlowFilesArchiveArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiGetFlowFilesArchiveHandler(api_call_handler_base.ApiCallHandler):
"""Generates archive with all files referenced in flow's results."""
args_type = ApiGetFlowFilesArchiveArgs
def __init__(self, path_globs_blacklist=None, path_globs_whitelist=None):
"""Constructor.
Args:
path_globs_blacklist: List of paths.GlobExpression values. Blacklist will
be applied before the whitelist.
path_globs_whitelist: List of paths.GlobExpression values. Whitelist will
be applied after the blacklist.
Raises:
ValueError: If path_globs_blacklist/whitelist is passed, but
the other blacklist/whitelist argument is not.
Note that path_globs_blacklist/whitelist arguments can only be passed
together. The algorithm of applying the lists is the following:
1. If the lists are not set, include the file into the archive. Otherwise:
2. If the file matches the blacklist, skip the file. Otherwise:
3. If the file does match the whitelist, skip the file.
"""
super(api_call_handler_base.ApiCallHandler, self).__init__()
if len([
x for x in (path_globs_blacklist, path_globs_whitelist) if x is None
]) == 1:
raise ValueError("path_globs_blacklist/path_globs_whitelist have to "
"set/unset together.")
self.path_globs_blacklist = path_globs_blacklist
self.path_globs_whitelist = path_globs_whitelist
def _WrapContentGenerator(self, generator, flow_results, args, token=None):
flow_ref = rdf_objects.FlowReference(
client_id=args.client_id, flow_id=args.flow_id)
object_reference = rdf_objects.ObjectReference(
reference_type=rdf_objects.ObjectReference.Type.FLOW, flow=flow_ref)
try:
for item in generator.Generate(flow_results):
yield item
notification.Notify(
token.username,
rdf_objects.UserNotification.Type.TYPE_FILE_ARCHIVE_GENERATED,
"Downloaded archive of flow %s from client %s (archived %d "
"out of %d items, archive size is %d)" %
(args.flow_id, args.client_id, len(generator.archived_files),
generator.total_files, generator.output_size), object_reference)
except Exception as e:
notification.Notify(
token.username,
rdf_objects.UserNotification.Type.TYPE_FILE_ARCHIVE_GENERATION_FAILED,
"Archive generation failed for flow %s on client %s: %s" %
(args.flow_id, args.client_id, e), object_reference)
raise
def _BuildPredicate(self, client_id, token=None):
if self.path_globs_whitelist is None:
return None
kb = data_store_utils.GetClientKnowledgeBase(client_id)
blacklist_regexes = []
for expression in self.path_globs_blacklist:
for pattern in expression.Interpolate(knowledge_base=kb):
blacklist_regexes.append(rdf_paths.GlobExpression(pattern).AsRegEx())
whitelist_regexes = []
for expression in self.path_globs_whitelist:
for pattern in expression.Interpolate(knowledge_base=kb):
whitelist_regexes.append(rdf_paths.GlobExpression(pattern).AsRegEx())
def Predicate(client_path):
# Enforce leading / since Regexes require it.
path = "/" + client_path.Path().lstrip("/")
return (not any(r.Match(path) for r in blacklist_regexes) and
any(r.Match(path) for r in whitelist_regexes))
return Predicate
def _GetFlow(self, args, token=None):
client_id = str(args.client_id)
flow_id = str(args.flow_id)
flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
flow_api_object = ApiFlow().InitFromFlowObject(flow_obj)
flow_results = data_store.REL_DB.ReadFlowResults(client_id, flow_id, 0,
db.MAX_COUNT)
flow_results = [r.payload for r in flow_results]
return flow_api_object, flow_results
def Handle(self, args, token=None):
flow_api_object, flow_results = self._GetFlow(args, token)
description = ("Files downloaded by flow %s (%s) that ran on client %s by "
"user %s on %s" %
(flow_api_object.name, args.flow_id, args.client_id,
flow_api_object.creator, flow_api_object.started_at))
target_file_prefix = "%s_flow_%s_%s" % (
args.client_id, flow_api_object.name, str(
flow_api_object.flow_id).replace(":", "_"))
if args.archive_format == args.ArchiveFormat.ZIP:
archive_format = archive_generator.CollectionArchiveGenerator.ZIP
file_extension = ".zip"
elif args.archive_format == args.ArchiveFormat.TAR_GZ:
archive_format = archive_generator.CollectionArchiveGenerator.TAR_GZ
file_extension = ".tar.gz"
else:
raise ValueError("Unknown archive format: %s" % args.archive_format)
generator = archive_generator.CollectionArchiveGenerator(
prefix=target_file_prefix,
description=description,
archive_format=archive_format,
predicate=self._BuildPredicate(str(args.client_id), token=token),
client_id=args.client_id.ToString())
content_generator = self._WrapContentGenerator(
generator, flow_results, args, token=token)
return api_call_handler_base.ApiBinaryStream(
target_file_prefix + file_extension,
content_generator=content_generator)
class ApiListFlowOutputPluginsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowOutputPluginsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginsResult
rdf_deps = [
api_output_plugin.ApiOutputPlugin,
]
class ApiListFlowOutputPluginsHandler(api_call_handler_base.ApiCallHandler):
"""Renders output plugins descriptors and states for a given flow."""
args_type = ApiListFlowOutputPluginsArgs
result_type = ApiListFlowOutputPluginsResult
def Handle(self, args, token=None):
flow_obj = data_store.REL_DB.ReadFlowObject(
str(args.client_id), str(args.flow_id))
output_plugins_states = flow_obj.output_plugins_states
type_indices = {}
result = []
for output_plugin_state in output_plugins_states:
plugin_state = output_plugin_state.plugin_state.Copy()
if "source_urn" in plugin_state:
del plugin_state["source_urn"]
if "token" in plugin_state:
del plugin_state["token"]
plugin_descriptor = output_plugin_state.plugin_descriptor
type_index = type_indices.setdefault(plugin_descriptor.plugin_name, 0)
type_indices[plugin_descriptor.plugin_name] += 1
# Output plugins states are stored differently for hunts and for flows:
# as a dictionary for hunts and as a simple list for flows.
#
# TODO(user): store output plugins states in the same way for flows
# and hunts. Until this is done, we can emulate the same interface in
# the HTTP API.
api_plugin = api_output_plugin.ApiOutputPlugin(
id=plugin_descriptor.plugin_name + "_%d" % type_index,
plugin_descriptor=plugin_descriptor,
state=plugin_state)
result.append(api_plugin)
return ApiListFlowOutputPluginsResult(items=result)
def GetOutputPluginIndex(plugin_descriptors, plugin_id):
"""Gets an output plugin index for a plugin with a given id.
Historically output plugins descriptors were stored in dicts-like
structures with unique identifiers as keys. In REL_DB-based implementation,
however, both plugin descriptors and their states are stored in flat
lists (see Flow definition in flows.proto).
The ids were formed as "<plugin name>_<plugin index>" where plugin index
was incremented for every plugin with a same name. For example, if we had
EmailOutputPlugin and 2 BigQueryOutputPlugins, their ids would be:
EmailOutputPlugin_0, BigQueryOutputPlugin_0, BigQueryOutputPlugin_1.
To preserve backwards API compatibility, we emulate the old behavior by
identifying plugins with same plugin ids as before..
Args:
plugin_descriptors: An iterable of OutputPluginDescriptor objects.
plugin_id: Plugin id to search for.
Returns:
An index of a plugin in plugin_descriptors iterable corresponding to a
given plugin_id.
Raises:
OutputPluginNotFoundError: if no plugin corresponding to a given plugin_id
was found.
"""
used_names = collections.Counter()
for (index, desc) in enumerate(plugin_descriptors):
cur_plugin_id = "%s_%d" % (desc.plugin_name, used_names[desc.plugin_name])
used_names[desc.plugin_name] += 1
if cur_plugin_id == plugin_id:
return index
raise OutputPluginNotFoundError("Can't find output plugin %s" % plugin_id)
class ApiListFlowOutputPluginLogsHandlerBase(
api_call_handler_base.ApiCallHandler):
"""Base class used to define log and error messages handlers."""
__abstract = True # pylint: disable=g-bad-name
log_entry_type = None
def Handle(self, args, token=None):
flow_obj = data_store.REL_DB.ReadFlowObject(
str(args.client_id), str(args.flow_id))
index = GetOutputPluginIndex(flow_obj.output_plugins, args.plugin_id)
output_plugin_id = "%d" % index
logs = data_store.REL_DB.ReadFlowOutputPluginLogEntries(
str(args.client_id),
str(args.flow_id),
output_plugin_id,
args.offset,
args.count or db.MAX_COUNT,
with_type=self.__class__.log_entry_type)
total_count = data_store.REL_DB.CountFlowOutputPluginLogEntries(
str(args.client_id),
str(args.flow_id),
output_plugin_id,
with_type=self.__class__.log_entry_type)
return self.result_type(
total_count=total_count,
items=[l.ToOutputPluginBatchProcessingStatus() for l in logs])
class ApiListFlowOutputPluginLogsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginLogsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowOutputPluginLogsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginLogsResult
rdf_deps = [
output_plugin.OutputPluginBatchProcessingStatus,
]
class ApiListFlowOutputPluginLogsHandler(ApiListFlowOutputPluginLogsHandlerBase
):
"""Renders flow's output plugin's logs."""
log_entry_type = rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.LOG
args_type = ApiListFlowOutputPluginLogsArgs
result_type = ApiListFlowOutputPluginLogsResult
class ApiListFlowOutputPluginErrorsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginErrorsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiListFlowOutputPluginErrorsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowOutputPluginErrorsResult
rdf_deps = [
output_plugin.OutputPluginBatchProcessingStatus,
]
class ApiListFlowOutputPluginErrorsHandler(
ApiListFlowOutputPluginLogsHandlerBase):
"""Renders flow's output plugin's errors."""
log_entry_type = rdf_flow_objects.FlowOutputPluginLogEntry.LogEntryType.ERROR
args_type = ApiListFlowOutputPluginErrorsArgs
result_type = ApiListFlowOutputPluginErrorsResult
class ApiListFlowsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowsArgs
rdf_deps = [
client.ApiClientId,
]
class ApiListFlowsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowsResult
rdf_deps = [
ApiFlow,
]
class ApiListFlowsHandler(api_call_handler_base.ApiCallHandler):
"""Lists flows launched on a given client."""
args_type = ApiListFlowsArgs
result_type = ApiListFlowsResult
def Handle(self, args, token=None):
all_flows = data_store.REL_DB.ReadAllFlowObjects(
client_id=str(args.client_id))
api_flow_dict = {
rdf_flow.flow_id:
ApiFlow().InitFromFlowObject(rdf_flow, with_args=False)
for rdf_flow in all_flows
}
# TODO(user): this is done for backwards API compatibility.
# Remove when AFF4 is gone.
for rdf_flow in api_flow_dict.values():
rdf_flow.nested_flows = []
child_flow_ids = set()
for rdf_flow in all_flows:
if not rdf_flow.parent_flow_id:
continue
if rdf_flow.parent_flow_id in api_flow_dict:
parent_flow = api_flow_dict[rdf_flow.parent_flow_id]
parent_flow.nested_flows.Append(api_flow_dict[rdf_flow.flow_id])
child_flow_ids.add(rdf_flow.flow_id)
result = [
f for f in itervalues(api_flow_dict) if f.flow_id not in child_flow_ids
]
result.sort(key=lambda f: f.started_at, reverse=True)
result = result[args.offset:]
if args.count:
result = result[:args.count]
return ApiListFlowsResult(items=result)
class ApiCreateFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiCreateFlowArgs
rdf_deps = [
client.ApiClientId,
ApiFlow,
ApiFlowReference,
]
class ApiCreateFlowHandler(api_call_handler_base.ApiCallHandler):
"""Starts a flow on a given client with given parameters."""
args_type = ApiCreateFlowArgs
result_type = ApiFlow
def Handle(self, args, token=None):
if not args.client_id:
raise ValueError("client_id must be provided")
runner_args = args.flow.runner_args
flow_name = args.flow.name
if not flow_name:
flow_name = runner_args.flow_name
if not flow_name:
raise RuntimeError("Flow name is not specified.")
# Clear all fields marked with HIDDEN, except for output_plugins - they are
# marked HIDDEN, because we have a separate UI for them, not because they
# shouldn't be shown to the user at all.
#
# TODO(user): Refactor the code to remove the HIDDEN label from
# FlowRunnerArgs.output_plugins.
runner_args.ClearFieldsWithLabel(
rdf_structs.SemanticDescriptor.Labels.HIDDEN,
exceptions="output_plugins")
if args.original_flow:
runner_args.original_flow = rdf_objects.FlowReference(
flow_id=str(args.original_flow.flow_id),
client_id=str(args.original_flow.client_id))
flow_cls = registry.FlowRegistry.FlowClassByName(flow_name)
cpu_limit = None
if runner_args.HasField("cpu_limit"):
cpu_limit = runner_args.cpu_limit
network_bytes_limit = None
if runner_args.HasField("network_bytes_limit"):
network_bytes_limit = runner_args.network_bytes_limit
flow_id = flow.StartFlow(
client_id=str(args.client_id),
cpu_limit=cpu_limit,
creator=token.username,
flow_args=args.flow.args,
flow_cls=flow_cls,
network_bytes_limit=network_bytes_limit,
original_flow=runner_args.original_flow,
output_plugins=runner_args.output_plugins,
parent_flow_obj=None,
)
flow_obj = data_store.REL_DB.ReadFlowObject(str(args.client_id), flow_id)
res = ApiFlow().InitFromFlowObject(flow_obj)
res.context = None
return res
class ApiCancelFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiCancelFlowArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiCancelFlowHandler(api_call_handler_base.ApiCallHandler):
"""Cancels given flow on a given client."""
args_type = ApiCancelFlowArgs
def Handle(self, args, token=None):
flow_base.TerminateFlow(
str(args.client_id), str(args.flow_id), reason="Cancelled in GUI")
class ApiListFlowDescriptorsResult(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiListFlowDescriptorsResult
rdf_deps = [
ApiFlowDescriptor,
]
class ApiListFlowDescriptorsHandler(api_call_handler_base.ApiCallHandler):
"""Renders all available flows descriptors."""
result_type = ApiListFlowDescriptorsResult
def __init__(self, access_check_fn=None):
super(ApiListFlowDescriptorsHandler, self).__init__()
self.access_check_fn = access_check_fn
def Handle(self, args, token=None):
"""Renders list of descriptors for all the flows."""
result = []
for name, cls in sorted(iteritems(registry.FlowRegistry.FLOW_REGISTRY)):
# Flows without a category do not show up in the GUI.
if not getattr(cls, "category", None):
continue
# Only show flows that the user is allowed to start.
try:
if self.access_check_fn:
self.access_check_fn(token.username, name)
except access_control.UnauthorizedAccess:
continue
result.append(ApiFlowDescriptor().InitFromFlowClass(cls, token=token))
return ApiListFlowDescriptorsResult(items=result)
class ApiGetExportedFlowResultsArgs(rdf_structs.RDFProtoStruct):
protobuf = flow_pb2.ApiGetExportedFlowResultsArgs
rdf_deps = [
client.ApiClientId,
ApiFlowId,
]
class ApiGetExportedFlowResultsHandler(api_call_handler_base.ApiCallHandler):
"""Exports results of a given flow with an instant output plugin."""
args_type = ApiGetExportedFlowResultsArgs
_RESULTS_PAGE_SIZE = 1000
def Handle(self, args, token=None):
iop_cls = instant_output_plugin.InstantOutputPlugin
plugin_cls = iop_cls.GetPluginClassByPluginName(args.plugin_name)
# TODO(user): Instant output plugins shouldn't depend on tokens
# and URNs.
flow_urn = rdfvalue.RDFURN("{}/flows/{}".format(args.client_id,
args.flow_id))
plugin = plugin_cls(source_urn=flow_urn, token=token)
client_id = str(args.client_id)
flow_id = str(args.flow_id)
types = data_store.REL_DB.CountFlowResultsByType(client_id, flow_id)
def FetchFn(type_name):
"""Fetches all flow results of a given type."""
offset = 0
while True:
results = data_store.REL_DB.ReadFlowResults(
client_id,
flow_id,
offset=offset,
count=self._RESULTS_PAGE_SIZE,
with_type=type_name)
if not results:
break
for r in results:
msg = r.AsLegacyGrrMessage()
msg.source = client_id
yield msg
offset += self._RESULTS_PAGE_SIZE
content_generator = instant_output_plugin.ApplyPluginToTypedCollection(
plugin, types, FetchFn)
return api_call_handler_base.ApiBinaryStream(
plugin.output_file_name, content_generator=content_generator)
|
|
import re
import os
import shutil
from gppylib.db import dbconn
from test.behave_utils.utils import check_schema_exists, check_table_exists, drop_table_if_exists
from gppylib.operations.backup_utils import get_lines_from_file
from behave import given, when, then
CREATE_MULTI_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (trans_id int, date date, amount decimal(9,2), region text)
WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (trans_id)
PARTITION BY RANGE (date)
SUBPARTITION BY LIST (region)
SUBPARTITION TEMPLATE
( SUBPARTITION usa VALUES ('usa'),
SUBPARTITION asia VALUES ('asia'),
SUBPARTITION europe VALUES ('europe'),
DEFAULT SUBPARTITION other_regions)
(START (date '2011-01-01') INCLUSIVE
END (date '2012-01-01') EXCLUSIVE
EVERY (INTERVAL '5 month'),
DEFAULT PARTITION outlying_dates)
"""
CREATE_PARTITION_TABLE_SQL = """
CREATE TABLE %s.%s (id int, date date) WITH (appendonly=true, orientation=column)
DISTRIBUTED BY (id)
PARTITION BY RANGE (date)
( START (date '2008-01-01') INCLUSIVE
END (date '2008-01-04') EXCLUSIVE
EVERY (INTERVAL '1 day'),
DEFAULT PARTITION default_dates);
"""
@given('there is a regular "{storage_type}" table "{tablename}" with column name list "{col_name_list}" and column type list "{col_type_list}" in schema "{schemaname}"')
def impl(context, storage_type, tablename, col_name_list, col_type_list, schemaname):
schemaname_no_quote = schemaname
if '"' in schemaname:
schemaname_no_quote = schemaname[1:-1]
if not check_schema_exists(context, schemaname_no_quote, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname_no_quote, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
create_table_with_column_list(context.conn, storage_type, schemaname, tablename, col_name_list, col_type_list)
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type=storage_type)
@given('there is a hard coded ao partition table "{tablename}" with 4 child partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type='ao')
@given('there is a hard coded multi-level ao partition table "{tablename}" with 4 mid-level and 16 leaf-level partitions in schema "{schemaname}"')
def impl(context, tablename, schemaname):
if not check_schema_exists(context, schemaname, context.dbname):
raise Exception("Schema %s does not exist in database %s" % (schemaname, context.dbname))
drop_table_if_exists(context, '.'.join([schemaname, tablename]), context.dbname)
dbconn.execSQL(context.conn, CREATE_MULTI_PARTITION_TABLE_SQL % (schemaname, tablename))
context.conn.commit()
check_table_exists(context, context.dbname, '.'.join([schemaname, tablename]), table_type='ao')
@given('no state files exist for database "{dbname}"')
def impl(context, dbname):
analyze_dir = get_analyze_dir(dbname)
if os.path.exists(analyze_dir):
shutil.rmtree(analyze_dir)
@then('"{number}" analyze directories exist for database "{dbname}"')
def impl(context, number, dbname):
dirs_found = get_list_of_analyze_dirs(dbname)
if str(number) != str(len(dirs_found)):
raise Exception("number of directories expected, %s, didn't match number found: %s" % (
str(number), str(len(dirs_found))))
@given('a view "{view_name}" exists on table "{table_name}" in schema "{schema_name}"')
def impl(context, view_name, table_name, schema_name):
create_view_on_table(context.conn, schema_name, table_name, view_name)
@given('"{qualified_table}" appears in the latest state files')
@then('"{qualified_table}" should appear in the latest state files')
def impl(context, qualified_table):
found, filename = table_found_in_state_file(context.dbname, qualified_table)
if not found:
if filename == '':
assert False, "no state files found for database %s" % context.dbname
else:
assert False, "table %s not found in state file %s" % (qualified_table, os.path.basename(filename))
@given('"{expected_result}" should appear in the latest ao_state file in database "{dbname}"')
@then('"{expected_result}" should appear in the latest ao_state file in database "{dbname}"')
def impl(context, expected_result, dbname):
latest_file = get_latest_aostate_file(dbname)
with open(latest_file, 'r') as f:
for line in f:
if expected_result in line:
return True
raise Exception("couldn't find %s in %s" % (expected_result, latest_file))
@given('columns "{col_name_list}" of table "{qualified_table}" appear in the latest column state file')
@then('columns "{col_name_list}" of table "{qualified_table}" should appear in the latest column state file')
def impl(context, col_name_list, qualified_table):
found, column, filename = column_found_in_state_file(context.dbname, qualified_table, col_name_list)
if not found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "column(s) %s of table %s not found in state file %s" % (
column, qualified_table, os.path.basename(filename))
@given('column "{col_name}" of table "{qualified_table}" does not appear in the latest column state file')
@then('column "{col_name}" of table "{qualified_table}" should not appear in the latest column state file')
def impl(context, col_name, qualified_table):
found, column, filename = column_found_in_state_file(context.dbname, qualified_table, col_name)
if found:
if filename == '':
assert False, "no column state file found for database %s" % context.dbname
else:
assert False, "unexpected column %s of table %s found in state file %s" % (
column, qualified_table, os.path.basename(filename))
@given('"{qualified_table}" appears in the latest report file')
@then('"{qualified_table}" should appear in the latest report file')
def impl(context, qualified_table):
found, filename = table_found_in_report_file(context.dbname, qualified_table)
if not found:
assert False, "table %s not found in report file %s" % (qualified_table, os.path.basename(filename))
@then('output should contain either "{output1}" or "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) and not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' or '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@then('output should not contain "{output1}"')
def impl(context, output1):
pat1 = re.compile(output1)
if pat1.search(context.stdout_message):
err_str = "Unexpected stdout string '%s', found:\n'%s'" % (output1, context.stdout_message)
raise Exception(err_str)
@then('output should contain both "{output1}" and "{output2}"')
def impl(context, output1, output2):
pat1 = re.compile(output1)
pat2 = re.compile(output2)
if not pat1.search(context.stdout_message) or not pat2.search(context.stdout_message):
err_str = "Expected stdout string '%s' and '%s', but found:\n'%s'" % (output1, output2, context.stdout_message)
raise Exception(err_str)
@given('table "{qualified_table}" does not appear in the latest state files')
def impl(context, qualified_table):
found, filename = table_found_in_state_file(context.dbname, qualified_table)
if found:
delete_table_from_state_files(context.dbname, qualified_table)
@given('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
@when('some data is inserted into table "{tablename}" in schema "{schemaname}" with column type list "{column_type_list}"')
def impl(context, tablename, schemaname, column_type_list):
insert_data_into_table(context.conn, schemaname, tablename, column_type_list)
@given('some ddl is performed on table "{tablename}" in schema "{schemaname}"')
def impl(context, tablename, schemaname):
perform_ddl_on_table(context.conn, schemaname, tablename)
@given('the user starts a transaction and runs "{query}" on "{dbname}"')
@when('the user starts a transaction and runs "{query}" on "{dbname}"')
def impl(context, query, dbname):
if 'long_lived_conn' not in context:
create_long_lived_conn(context, dbname)
dbconn.execSQL(context.long_lived_conn, 'BEGIN; %s' % query)
@given('the user commits transaction')
@when('the user commits transaction')
def impl(context):
dbconn.execSQL(context.long_lived_conn, 'END;')
@given('the user rollsback the transaction')
@when('the user rollsback the transaction')
def impl(context):
dbconn.execSQL(context.long_lived_conn, 'ROLLBACK;')
@then('the latest state file should have a mod count of {mod_count} for table "{table}" in "{schema}" schema for database "{dbname}"')
def impl(context, mod_count, table, schema, dbname):
mod_count_in_state_file = get_mod_count_in_state_file(dbname, schema, table)
if mod_count_in_state_file != mod_count:
raise Exception(
"mod_count %s does not match mod_count %s in state file for %s.%s" %
(mod_count, mod_count_in_state_file, schema, table))
def get_mod_count_in_state_file(dbname, schema, table):
file = get_latest_aostate_file(dbname)
comma_name = ','.join([schema, table])
for line in get_lines_from_file(file):
if comma_name in line:
return line.split(',')[2]
return -1
def create_long_lived_conn(context, dbname):
context.long_lived_conn = dbconn.connect(dbconn.DbURL(dbname=dbname))
def table_found_in_state_file(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False, ""
state_file = ""
for state_file in files:
found = False
for line in get_lines_from_file(state_file):
if comma_name in line:
found = True
continue
if not found:
return False, state_file
return True, state_file
def table_found_in_report_file(dbname, qualified_table):
report_file = get_latest_analyze_report_file(dbname)
for line in get_lines_from_file(report_file):
if qualified_table == line:
return True, report_file
return False, report_file
def column_found_in_state_file(dbname, qualified_table, col_name_list):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
if len(files) == 0:
return False, "", ""
for state_file in files:
if "col_state_file" not in state_file:
continue
for line in get_lines_from_file(state_file):
if comma_name in line:
for column in col_name_list.split(','):
if column not in line.split(',')[2:]:
return False, column, state_file
return True, "", state_file
return False, col_name_list, state_file
def delete_table_from_state_files(dbname, qualified_table):
comma_name = ','.join(qualified_table.split('.'))
files = get_latest_analyze_state_files(dbname)
for filename in files:
lines = get_lines_from_file(filename)
f = open(filename, "w")
for line in lines:
if comma_name not in line:
f.write(line)
f.close()
def get_list_of_analyze_dirs(dbname):
analyze_dir = get_analyze_dir(dbname)
if not os.path.exists(analyze_dir):
return []
ordered_list = [os.path.join(analyze_dir, x) for x in sorted(os.listdir(analyze_dir), reverse=True)]
return filter(os.path.isdir, ordered_list)
def get_latest_analyze_dir(dbname):
analyze_dir = get_analyze_dir(dbname)
folders = get_list_of_analyze_dirs(dbname)
if len(folders) == 0:
return []
return os.path.join(analyze_dir, folders[0])
def get_analyze_dir(dbname):
master_data_dir = os.environ.get('MASTER_DATA_DIRECTORY')
analyze_dir = os.path.join(master_data_dir, 'db_analyze', dbname)
return analyze_dir
def get_latest_aostate_file(dbname):
for path in get_latest_analyze_state_files(dbname):
if 'ao_state' in path:
return path
return None
def get_latest_analyze_state_files(dbname):
"""
return the latest state files (absolute paths)
"""
state_files_dir = get_latest_analyze_dir(dbname)
if not state_files_dir:
return []
files = os.listdir(state_files_dir)
if len(files) != 4:
raise Exception("Missing or unexpected state files in folder %s" % state_files_dir)
ret = []
for f in files:
if 'report' not in f:
ret.append(os.path.join(state_files_dir, f))
return ret
def get_latest_analyze_report_file(dbname):
"""
return the latest report file (absolute path)
"""
report_file_dir = get_latest_analyze_dir(dbname)
if not report_file_dir:
return []
files = os.listdir(report_file_dir)
for f in files:
if 'report' in f:
return os.path.join(report_file_dir, f)
raise Exception("Missing report file in folder %s" % report_file_dir)
def create_table_with_column_list(conn, storage_type, schemaname, tablename, col_name_list, col_type_list):
col_name_list = col_name_list.strip().split(',')
col_type_list = col_type_list.strip().split(',')
col_list = ' (' + ','.join(['%s %s' % (x, y) for x, y in zip(col_name_list, col_type_list)]) + ') '
if storage_type.lower() == 'heap':
storage_str = ''
elif storage_type.lower() == 'ao':
storage_str = " with (appendonly=true) "
elif storage_type.lower() == 'co':
storage_str = " with (appendonly=true, orientation=column) "
else:
raise Exception("Invalid storage type")
query = 'CREATE TABLE %s.%s %s %s DISTRIBUTED RANDOMLY' % (schemaname, tablename, col_list, storage_str)
dbconn.execSQL(conn, query)
conn.commit()
def insert_data_into_table(conn, schemaname, tablename, col_type_list):
col_type_list = col_type_list.strip().split(',')
col_str = ','.join(["(random()*i)::%s" % x for x in col_type_list])
query = "INSERT INTO " + schemaname + '.' + tablename + " SELECT " + col_str + " FROM generate_series(1,100) i"
dbconn.execSQL(conn, query)
conn.commit()
def perform_ddl_on_table(conn, schemaname, tablename):
query = "ALTER TABLE " + schemaname + '.' + tablename + " ADD COLUMN tempcol int default 0"
dbconn.execSQL(conn, query)
query = "ALTER TABLE " + schemaname + '.' + tablename + " DROP COLUMN tempcol"
dbconn.execSQL(conn, query)
conn.commit()
def create_view_on_table(conn, schemaname, tablename, viewname):
query = "CREATE OR REPLACE VIEW " + schemaname + "." + viewname + \
" AS SELECT * FROM " + schemaname + "." + tablename
dbconn.execSQL(conn, query)
conn.commit()
|
|
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Wav2Vec2 checkpoint."""
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
Speech2Text2Config,
Speech2Text2ForCausalLM,
Speech2Text2Tokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
Wav2Vec2Config,
Wav2Vec2FeatureExtractor,
Wav2Vec2Model,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
MAPPING = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
TOP_LEVEL_KEYS = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def set_recursively(hf_pointer, key, value, full_name, weight_type):
for attribute in key.split("."):
hf_pointer = getattr(hf_pointer, attribute)
if weight_type is not None:
hf_shape = getattr(hf_pointer, weight_type).shape
else:
hf_shape = hf_pointer.shape
assert (
hf_shape == value.shape
), f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be {value.shape} for {full_name}"
if weight_type == "weight":
hf_pointer.weight.data = value
elif weight_type == "weight_g":
hf_pointer.weight_g.data = value
elif weight_type == "weight_v":
hf_pointer.weight_v.data = value
elif weight_type == "bias":
hf_pointer.bias.data = value
else:
hf_pointer.data = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.")
def recursively_load_weights_wav2vec2(fairseq_model, hf_model):
unused_weights = []
fairseq_dict = fairseq_model.state_dict()
feature_extractor = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
proj_weight = None
for name, value in fairseq_dict.items():
is_used = False
if "conv_layers" in name:
load_conv_layer(
name,
value,
feature_extractor,
unused_weights,
hf_model.config.feat_extract_norm == "group",
)
is_used = True
elif name.split(".")[0] == "proj":
proj_weight = fairseq_model.proj
is_used = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
is_used = True
if "*" in mapped_key:
layer_index = name.split(key)[0].split(".")[-2]
mapped_key = mapped_key.replace("*", layer_index)
if "weight_g" in name:
weight_type = "weight_g"
elif "weight_v" in name:
weight_type = "weight_v"
elif "bias" in name:
weight_type = "bias"
elif "weight" in name:
weight_type = "weight"
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if not is_used:
unused_weights.append(name)
logger.warning(f"Unused weights: {unused_weights}")
return proj_weight
def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm):
name = full_name.split("conv_layers.")[-1]
items = name.split(".")
layer_id = int(items[0])
type_id = int(items[1])
if type_id == 0:
if "bias" in name:
assert (
value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape
), f"{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
feature_extractor.conv_layers[layer_id].conv.bias.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert (
value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape
), f"{full_name} has size {value.shape}, but {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
feature_extractor.conv_layers[layer_id].conv.weight.data = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert (
value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape
), f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was found."
feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
elif "weight" in name:
assert (
value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape
), f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.")
else:
unused_weights.append(full_name)
def make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def create_vocab_dict(dict_path):
with open(dict_path, "r", encoding="utf-8") as f:
lines = f.readlines()
words = [line.split(" ")[0] for line in lines]
num_words = len(words)
vocab_dict = {
"<s>": 0,
"<pad>": 1,
"</s>": 2,
"<unk>": 3,
}
vocab_dict.update({k: v for k, v in zip(words, range(4, num_words + 4))})
return vocab_dict
@torch.no_grad()
def convert_wav2vec2_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
dict_path,
encoder_config_path,
decoder_config_path,
vocab_size,
num_decoder_layers,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
encoder_config = Wav2Vec2Config.from_pretrained(encoder_config_path)
decoder_config = Speech2Text2Config.from_pretrained(
decoder_config_path, vocab_size=vocab_size, decoder_layers=num_decoder_layers, do_stable_layer_norm=True
)
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=16000,
padding_value=0,
do_normalize=True,
return_attention_mask=True,
)
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}
)
model = model[0].eval()
# set weights for wav2vec2 encoder
hf_encoder = Wav2Vec2Model(encoder_config)
projection_layer = recursively_load_weights_wav2vec2(model.encoder, hf_encoder)
hf_decoder = Speech2Text2ForCausalLM(decoder_config)
missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False)
# set output linear layer
unexpected_keys.remove("embed_out")
hf_decoder.lm_head.weight = nn.Parameter(model.decoder.embed_out.detach())
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}")
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}")
hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder)
hf_wav2vec.config.tie_word_embeddings = False
# add projection layer
hf_wav2vec.enc_to_dec_proj.weight = nn.Parameter(projection_layer.weight)
hf_wav2vec.enc_to_dec_proj.bias = nn.Parameter(projection_layer.bias)
vocab_dict = create_vocab_dict(dict_path)
with open(os.path.join(pytorch_dump_folder_path, "vocab.json"), "w") as fp:
json.dump(vocab_dict, fp)
tokenizer = Speech2Text2Tokenizer(os.path.join(pytorch_dump_folder_path, "vocab.json"))
tokenizer.save_pretrained(pytorch_dump_folder_path)
config = hf_wav2vec.config.to_dict()
config["pad_token_id"] = tokenizer.pad_token_id
config["bos_token_id"] = tokenizer.bos_token_id
config["eos_token_id"] = tokenizer.eos_token_id
config["tokenizer_class"] = "speech_to_text_2"
config["feature_extractor_type"] = "wav2vec2"
hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config)
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
feature_extractor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
args = parser.parse_args()
convert_wav2vec2_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
|
|
#
# Khan Academy Exercises for Course Builder
#
# Copy Right (C) 2013 Pavel Simakov (pavel@vokamis.com)
# https://github.com/psimakov/khan-exercises-coursebuilder
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA
#
"""Khan Academy exercises for Course Builder.
This extension lets you host Khan Academy exercises in your Course Builder
course. These exercises make learning complex subjects fun and they are free to
everyone! However, the framework is very complex; it's difficult to embed its
exercises into another web site. We made framework changes, which make it
easier to embed the exercises and collect the exercise results right in your
Course Builder course.
Here is how to install, activate and use this module:
- download Course Builder (1.4.1)
- download this package
- copy all files in this package into /modules/khanex/... folder of your
Course Builder application folder
- edit main.app of your application
- add new import where all other modules are imported:
import modules.khanex.khanex
- enable the module, where all other modules are enabled:
modules.khanex.khanex.register_module().enable()
- restart your local development sever or re-deploy your application
- edit a lesson using visual editor; you should be able to add a new
component type "Khan Academy Exercise"
- the component editor should show a list of all exercises available in a
dropdown list; there should be over 400 exercises listed here
- pick one exercise, save the component configuration
- add empty exercise definition var activity = []; uncheck 'Activity Listed'
- save the lesson
- enable gcb_can_persist_activity_events and gcb_can_persist_page_events
- preview the lesson
- click "Check Answer" and see how data is recorded in the datastore
EventEntity table with a namespace appropriate for your course
- this is it!
This work is based on my other project, which brings Khan Academy exercises to
WordPress. You can learn more about it here:
http://www.softwaresecretweapons.com/jspwiki/khan-exercises
Here are the things I found difficult to do while completing this integration:
- if a unit is marked completed in a progress tracking system, and author
adds new unit - the progress indicator is now wrong and must be recomputed
to account for a new added unit
- why don't we circles next to the lessons of the left nav. of the unit?
- how does a tag/module know what unit/lesson is being shown now? we need to
pass some kind of course_context into tag/module... how much of view model
do we expose to the tag/module? can a tag instance introspect what is
above and below it in the rendering context?
- we do show "Loading..." indicator while exercise JavaScript loads up,
but it is dismissed too early now
- tag.render() method must give access to the handler, ideally before the
rendering phase begins
- our model for tracking progress assumes lesson is an atomic thing; so
if one has 3 exercises on one lesson page, only one marker is used and
it is not possible to track progress of individual exercises; ideally any
container should automatically support progress tracking for its children
We need to improve these over time.
Good luck!
"""
__author__ = 'Pavel Simakov (pavel@vokamis.com)'
import cgi
import os
import urllib2
import urlparse
from xml.etree import cElementTree
import zipfile
from common import schema_fields
from common import tags
from controllers import sites
from controllers import utils
from models.config import ConfigProperty
from models.counters import PerfCounter
from models import custom_modules
from models import models
from models import transforms
ATTEMPT_COUNT = PerfCounter(
'gcb-khanex-attempt-count',
'A number of attempts made by all users on all exercises.')
WHITELISTED_EXERCISES = ConfigProperty(
'_khanex_whitelisted', str, (
'A white-listed exercises that can be show to students. If this list '
'is empty, all exercises are available.'),
default_value='', multiline=True)
ZIP_FILE = os.path.join(os.path.dirname(__file__), 'khan-exercises.zip')
EXERCISE_BASE = 'khan-exercises/khan-exercises/exercises/'
EXERCISE_HTML_PAGE_RAW = (
"""<!DOCTYPE html>
<html">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<head>
<body>
<div style="width: 100%; margin: 0px;">
<style>
/* hide optional elements */
div.exercises-header {
display: none;
}
div#extras{
display: none;
}
/* customize color scheme to match */
article.exercises-content {
background-color: transparent;
}
div#workarea {
background-color: white;
padding: 8px;
margin-bottom: 8px;
}
</style>
<header style="display: none;" />
<div id="container" class="single-exercise visited-no-recolor"
style="overflow: hidden;">
<article class="exercises-content clearfix">
<div class="exercises-header"><h2 class="section-headline">
<div class="topic-exercise-badge"> </div>
<span class="practice-exercise-topic-context">Practicing</span>
</h2></div>
<div class="exercises-body">
<div class="exercises-stack"> </div>
<div class="exercises-card current-card">
<div class="current-card-container card-type-problem">
<div class="current-card-container-inner vertical-shadow">
<div class="current-card-contents">
</div>
</div>
<div id="extras" class="single-exercise">
<ul>
<li>
<a id="scratchpad-show" href style>
Show scratchpad</a>
<span id="scratchpad-not-available"
style="display: none;">
Scratchpad not available</span>
</li>
<li class="debug-mode">
<a href="?debug">Debug mode</a>
</li>
<li>
<a id="problem-permalink" href>
Problem permalink</a>
</li>
</ul>
</div>
</div>
</div>
</div>
</article>
</div>
<footer id="footer" class="short" style="display: none;"></footer>
</div>
</body>
</html>""")
def _allowed(name):
"""Checks if an exercise name is whitelisted for use."""
return (
not WHITELISTED_EXERCISES.value or
name in WHITELISTED_EXERCISES.value)
class KhanExerciseTag(tags.BaseTag):
"""Custom tag for embedding Khan Academy Exercises."""
@classmethod
def name(cls):
return 'Khan Academy Exercise'
@classmethod
def vendor(cls):
return 'psimakov'
def render(self, node, unused_handler):
"""Embed just a <script> tag that will in turn create an <iframe>."""
name = node.attrib.get('name')
caption = name.replace('_', ' ')
return cElementTree.XML(
"""
<div style='width: 450px;'>
Khan Academy Exercise: %s
<br/>
<script>
// customize the style of the exercise iframe
var ity_ef_style = "width: 750px;";
</script>
<script src="%s" type="text/javascript"></script>
</div>""" % (
cgi.escape(caption), 'khan-exercises/embed.js?static:%s' % name))
def get_schema(self, unused_handler):
"""Make schema with a list of all exercises by inspecting a zip file."""
zip_file = zipfile.ZipFile(ZIP_FILE)
exercise_list = []
for name in zip_file.namelist():
if name.startswith(EXERCISE_BASE) and name != EXERCISE_BASE:
exercise_list.append(name[len(EXERCISE_BASE):])
items = []
index = 1
for url in sorted(exercise_list):
name = url.replace('.html', '')
if _allowed(name):
caption = name.replace('_', ' ')
items.append((name, '#%s: %s' % (index, caption)))
index += 1
reg = schema_fields.FieldRegistry('Khan Exercises')
reg.add_property(
schema_fields.SchemaField(
'name', 'Exercises', 'select', optional=True,
select_data=items,
description=('The relative URL name of the exercise.')))
return reg
class KhanExerciseRenderer(utils.BaseHandler):
"""A handler that renders Khan Academy Exercise."""
def _render_indirect(self, slug):
parts = slug.split(':')
if len(parts) != 2:
raise Exception(
'Error processing request. Expected \'ity_ef_slug\' in a form '
'of \'protocol:identifier\'.')
if 'static' != parts[0]:
raise Exception('Bad protocol.')
zip_file = zipfile.ZipFile(ZIP_FILE)
html_file = zip_file.open(EXERCISE_BASE + parts[1] + '.html')
self.response.write(html_file.read())
def _record_student_submission(self, data):
"""Record data in a specific course namespace."""
# get student
student = self.personalize_page_and_get_enrolled()
if not student:
return False
# record submission
models.EventEntity.record(
'module-khanex.exercise-submit', self.get_user(), data)
# update progress
unit_id, lesson_id = self._get_unit_lesson_from(data)
self.get_course().get_progress_tracker().put_activity_accessed(
student, unit_id, lesson_id)
return True
def _get_unit_lesson_from(self, data):
"""Extract unit and lesson id from exercise data submission."""
# we need to figure out unit and lesson id for the exercise;
# we currently have no direct way of doing it, so we have to do it
# indirectly == ugly...; an exercise captures a page URL where it was
# embedded; we can parse that URL out and find all the interesting
# parts from the query string
unit_id = 0
lesson_id = 0
json = transforms.loads(data)
if json:
location = json.get('location')
if location:
location = urllib2.unquote(location)
params_map = urlparse.parse_qs(location)
ity_ef_origin = params_map.get('ity_ef_origin')
if ity_ef_origin:
ity_ef_origin = ity_ef_origin[0]
origin_path = urlparse.urlparse(ity_ef_origin)
if origin_path.query:
query = urlparse.parse_qs(origin_path.query)
unit_id = self._int_list_to_int(query.get('unit'))
lesson_id = self._int_list_to_int(query.get('lesson'))
# when we are on the first lesson of a unit, leson_id is
# not present :(; look it up
if not lesson_id:
lessons = self.get_course().get_lessons(unit_id)
if lessons:
lesson_id = lessons[0].lesson_id
return unit_id, lesson_id
def _int_list_to_int(self, list):
if list:
return int(list[0])
return 0
def post(self):
"""Handle POST, i.e. 'Check Answer' button is pressed."""
data = self.request.get('ity_ef_audit')
if self._record_student_submission(data):
ATTEMPT_COUNT.inc()
self.response.write('{}') # we must return valid JSON on success
return
self.error(404)
def get(self):
"""Handle GET."""
rule = self.request.get('ity_ef_rule')
slug = self.request.get('ity_ef_slug')
# render raw
if rule == 'raw':
self.response.write(EXERCISE_HTML_PAGE_RAW)
return
# render indirect
if slug and _allowed(slug):
self._render_indirect(slug)
return
self.error(404)
custom_module = None
def register_module():
"""Registers this module in the registry."""
# register custom tag
tags.Registry.add_tag_binding('khanex', KhanExerciseTag)
# register handler
zip_handler = (
'/khan-exercises', sites.make_zip_handler(ZIP_FILE))
render_handler = (
'/khan-exercises/khan-exercises/indirect/', KhanExerciseRenderer)
# register module
global custom_module
custom_module = custom_modules.Module(
'Khan Academy Exercise',
'A set of pages for delivering Khan Academy Exercises via '
'Course Builder.',
[], [render_handler, zip_handler])
return custom_module
|
|
"""Test the Panasonic Viera setup process."""
from homeassistant.components.panasonic_viera.const import (
ATTR_DEVICE_INFO,
ATTR_FRIENDLY_NAME,
ATTR_MANUFACTURER,
ATTR_MODEL_NUMBER,
ATTR_UDN,
CONF_APP_ID,
CONF_ENCRYPTION_KEY,
CONF_ON_ACTION,
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
MOCK_CONFIG_DATA = {
CONF_HOST: "0.0.0.0",
CONF_NAME: DEFAULT_NAME,
CONF_PORT: DEFAULT_PORT,
CONF_ON_ACTION: None,
}
MOCK_ENCRYPTION_DATA = {
CONF_APP_ID: "mock-app-id",
CONF_ENCRYPTION_KEY: "mock-encryption-key",
}
MOCK_DEVICE_INFO = {
ATTR_FRIENDLY_NAME: DEFAULT_NAME,
ATTR_MANUFACTURER: "mock-manufacturer",
ATTR_MODEL_NUMBER: "mock-model-number",
ATTR_UDN: "mock-unique-id",
}
def get_mock_remote(device_info=MOCK_DEVICE_INFO):
"""Return a mock remote."""
mock_remote = Mock()
async def async_create_remote_control(during_setup=False):
return
mock_remote.async_create_remote_control = async_create_remote_control
async def async_get_device_info():
return device_info
mock_remote.async_get_device_info = async_get_device_info
return mock_remote
async def test_setup_entry_encrypted(hass):
"""Test setup with encrypted config entry."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_DEVICE_INFO[ATTR_UDN],
data={**MOCK_CONFIG_DATA, **MOCK_ENCRYPTION_DATA, **MOCK_DEVICE_INFO},
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_entry_encrypted_missing_device_info(hass):
"""Test setup with encrypted config entry and missing device info."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_CONFIG_DATA[CONF_HOST],
data={**MOCK_CONFIG_DATA, **MOCK_ENCRYPTION_DATA},
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert mock_entry.data[ATTR_DEVICE_INFO] == MOCK_DEVICE_INFO
assert mock_entry.unique_id == MOCK_DEVICE_INFO[ATTR_UDN]
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_entry_encrypted_missing_device_info_none(hass):
"""Test setup with encrypted config entry and device info set to None."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_CONFIG_DATA[CONF_HOST],
data={**MOCK_CONFIG_DATA, **MOCK_ENCRYPTION_DATA},
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote(device_info=None)
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert mock_entry.data[ATTR_DEVICE_INFO] is None
assert mock_entry.unique_id == MOCK_CONFIG_DATA[CONF_HOST]
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_entry_unencrypted(hass):
"""Test setup with unencrypted config entry."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_DEVICE_INFO[ATTR_UDN],
data={**MOCK_CONFIG_DATA, **MOCK_DEVICE_INFO},
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_entry_unencrypted_missing_device_info(hass):
"""Test setup with unencrypted config entry and missing device info."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_CONFIG_DATA[CONF_HOST],
data=MOCK_CONFIG_DATA,
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert mock_entry.data[ATTR_DEVICE_INFO] == MOCK_DEVICE_INFO
assert mock_entry.unique_id == MOCK_DEVICE_INFO[ATTR_UDN]
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_entry_unencrypted_missing_device_info_none(hass):
"""Test setup with unencrypted config entry and device info set to None."""
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=MOCK_CONFIG_DATA[CONF_HOST],
data=MOCK_CONFIG_DATA,
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote(device_info=None)
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert mock_entry.data[ATTR_DEVICE_INFO] is None
assert mock_entry.unique_id == MOCK_CONFIG_DATA[CONF_HOST]
state = hass.states.get("media_player.panasonic_viera_tv")
assert state
assert state.name == DEFAULT_NAME
async def test_setup_config_flow_initiated(hass):
"""Test if config flow is initiated in setup."""
assert (
await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {CONF_HOST: "0.0.0.0"}},
)
is True
)
assert len(hass.config_entries.flow.async_progress()) == 1
async def test_setup_unload_entry(hass):
"""Test if config entry is unloaded."""
mock_entry = MockConfigEntry(
domain=DOMAIN, unique_id=MOCK_DEVICE_INFO[ATTR_UDN], data=MOCK_CONFIG_DATA
)
mock_entry.add_to_hass(hass)
mock_remote = get_mock_remote()
with patch(
"homeassistant.components.panasonic_viera.Remote",
return_value=mock_remote,
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.config_entries.async_unload(mock_entry.entry_id)
assert mock_entry.state == ENTRY_STATE_NOT_LOADED
state = hass.states.get("media_player.panasonic_viera_tv")
assert state is None
|
|
# Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 21, 2012
@author: Bilel Msekni
@contact: bilel.msekni@telecom-sudparis.eu
@author: Houssem Medhioub
@contact: houssem.medhioub@it-sudparis.eu
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
try:
import simplejson as json
except ImportError:
import json
import pyocni.adapters.cnv_toHTTP as extractor
from webob import Response
class To_HTTP_Text_Plain():
"""
Converts Response data from application/occi+json object to HTTP text/plain descriptions
"""
def format_to_text_plain_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "kind") + "\n"
if var.has_key('mixins'):
items = var['mixins']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "mixin") + "\n"
if var.has_key('actions'):
items = var['actions']
for item in items:
resp += "Category :" + cnv_JSON_category(item, "action") + "\n"
return resp
def format_to_text_plain_entities(self, var):
"""
Convert a JSON resource description into a text/plain resource description
Args:
@param var: JSON resource description
"""
response = ""
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
if var.has_key('links'):
items = var['links']
response += ",\n"
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response += "Category: " + c + "\n"
for l in link:
response += "Link: " + l + "\n"
for a in att:
response += "X-OCCI-Attribute: " + a + "\n"
response = response[:-1] + ",\n"
response = response[:-2]
return response
def format_to_text_plain_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "Location: " + item + "\n"
return locs
def format_to_text_plain_x_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
for item in var:
locs += "X-OCCI-Location: " + item + "\n"
return locs
class To_HTTP_Text_OCCI():
"""
Converts Response data from application/occi+json object to HTTP text/occi descriptions
"""
def format_to_text_occi_categories(self, var):
"""
Format JSON categories into HTTP text/plain categories
Args:
@param var: JSON categories
"""
resp = Response()
resp.headers.clear()
value = ""
if var.has_key('kinds'):
items = var['kinds']
for item in items:
value = cnv_JSON_category(item, "kind") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('mixins'):
items = var['mixins']
for item in items:
value = cnv_JSON_category(item, "mixin") + ",\n"
resp.headers.add('Category', value[:-2])
if var.has_key('actions'):
items = var['actions']
for item in items:
value = cnv_JSON_category(item, "action") + ",\n"
resp.headers.add('Category', value[:-2])
return resp.headers
def format_to_text_occi_entities(self, var):
"""
Convert a JSON resource description into a text/occi resource description
Args:
@param var: JSON resource description
"""
response = Response()
response.headers.clear()
if var.has_key('resources'):
items = var['resources']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
if var.has_key('links'):
items = var['links']
for item in items:
cat, link, att = cnv_JSON_Resource(item)
for c in cat:
response.headers.add("Category", c)
for l in link:
response.headers.add("Link", l)
for a in att:
response.headers.add("X-OCCI-Attribute", a)
return response.headers
def format_to_text_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("Location", locs[:-1])
return resp.headers
def format_to_text_x_occi_locations(self, var):
"""
Converts JSON locations into HTTP locations
Args:
var: JSON locations
"""
locs = ""
resp = Response()
resp.headers.clear()
for item in var:
locs += item + ","
resp.headers.add("X-OCCI-Location", locs[:-1])
return resp.headers
class To_HTTP_Text_URI_List():
"""
Converts Response data from application/occi+json object to HTTP text/uri descriptions
"""
def __init__(self):
pass
def check_for_uri_locations(self, var):
"""
Checks for the existence of path URIs in a JSON location object
Args:
@param var: JSON location object
"""
resp = ""
for item in var:
resp += item + "\n"
return resp, True
def cnv_JSON_category(category, type):
"""
Converts a json category into a HTTP category
Args:
@param category: JSON category
@param type: Category type = (kind || mixin || action)
"""
http_cat = extractor.extract_term_from_category(category) + ';'
http_cat += "scheme=\"" + extractor.extract_scheme_from_category(category) + "\";"
http_cat += "class=\"" + type + "\";"
title = extractor.extract_title_from_category(category)
if title is not None:
http_cat += "title=\"" + title + "\";"
rel = extractor.extract_related_from_category(category)
if rel is not None:
http_cat += "rel=\"" + rel + "\";"
attributes = extractor.extract_attributes_from_category(category)
if attributes is not None:
http_cat += "attributes=\"" + attributes + "\";"
actions = extractor.extract_actions_from_category(category)
if actions is not None:
http_cat += "actions=\"" + actions + "\";"
location = extractor.extract_location_from_category(category)
if location is not None:
http_cat += "location=\"" + location + "\";"
return http_cat
def cnv_JSON_Resource(json_object):
"""
Converts a JSON Resource into a HTTP Resource
"""
res_cat = list()
res_links = list()
res_cat.append(extractor.extract_kind_from_entity(json_object))
items = extractor.extract_mixin_from_entity(json_object)
if items is not None:
res_cat.extend(items)
var = extractor.extract_attributes_from_entity(json_object)
if var is not None:
res_att = var
else:
res_att = list()
items = extractor.extract_internal_link_from_entity(json_object)
if items is not None:
res_links.extend(items)
items = extractor.extract_actions_from_entity(json_object)
if items is not None:
res_links.extend(items)
return res_cat, res_links, res_att
|
|
"""Test match.py."""
import textwrap
import unittest
from pytype import convert_structural
from pytype.pytd import pytd
from pytype.pytd.parse import builtins
from pytype.pytd.parse import parser
from pytype.tests import test_inference
import unittest
class MatchTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.builtins_pytd = builtins.GetBuiltinsPyTD()
def parse(self, src):
return parser.parse_string(textwrap.dedent(src))
def parse_and_solve(self, src):
types, _ = convert_structural.solve(self.parse(src),
builtins_pytd=self.builtins_pytd)
return types
def test_simple(self):
mapping = self.parse_and_solve("""
class `~unknown2`:
pass
class `~unknown1`:
def __add__(self, other: `~unknown2`) -> int
""")
self.assertItemsEqual(["int", "bool"], mapping["~unknown1"])
self.assertItemsEqual(["int", "bool"], mapping["~unknown2"])
def test_float_and_bytearray(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, y: int) -> float
def __add__(self, y: float) -> float
class `~unknown2`:
def __add__(self, y: str) -> bytearray
def __add__(self, y: bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_float_and_bytearray2(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, y: int or float) -> float
class `~unknown2`:
def __add__(self, y: bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_append(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def append(self, v: int) -> NoneType
""")
self.assertItemsEqual(["list", "bytearray"], mapping["~unknown1"])
def test_single_list(self):
# Differs from test_append in that append(float) doesn't match bytearray
mapping = self.parse_and_solve("""
class `~unknown1`:
def append(self, v: float) -> NoneType
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["list"], mapping["~unknown1"])
self.assertItemsEqual(["float"], mapping["~unknown1.list.T"])
def test_list(self):
mapping = self.parse_and_solve("""
class `~unknown2`:
def append(self, v: `~unknown1`) -> NoneType
def __getitem__(self, i: ?) -> `~unknown1`
class `~unknown1`:
def __add__(self: float, y: int) -> float
def __add__(self: float, y: float) -> float
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["list"], mapping["~unknown2"])
self.assertItemsEqual(["float"], mapping["~unknown2.list.T"])
def test_float_list(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def append(self, v: ?) -> NoneType
def __getitem__(self, i: int) -> float
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["list"], mapping["~unknown1"])
self.assertItemsEqual(["float"], mapping["~unknown1.list.T"])
def test_two_lists(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def append(self: list, v: NoneType) -> NoneType
class `~unknown2`:
def remove(self: list, v: float) -> NoneType
""")
self.assertItemsEqual(["list"], mapping["~unknown1"])
self.assertItemsEqual(["list"], mapping["~unknown2"])
self.assertItemsEqual(["NoneType"], mapping["~unknown1.list.T"])
self.assertItemsEqual(["float"], mapping["~unknown2.list.T"])
def test_float(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, v: int) -> float
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
def test_or(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def join(self, iterable: unicode) -> str or unicode
def join(self, iterable: iterator) -> str or unicode
""")
self.assertItemsEqual(["str"], mapping["~unknown1"])
def test_multiple(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, y: int) -> float
def __add__(self, y: float) -> float
class `~unknown2`:
def __add__(self, y: str) -> bytearray
def __add__(self, y: bytearray) -> bytearray
class `~unknown3`:
def join(self, iterable) -> str
def join(self, iterable: unicode) -> str or unicode
def join(self, iterable: iterator) -> str or unicode
class `~unknown4`:
def append(self, v: NoneType) -> NoneType
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
self.assertItemsEqual(["str"], mapping["~unknown3"])
self.assertItemsEqual(["list"], mapping["~unknown4"])
self.assertItemsEqual(["NoneType"], mapping["~unknown4.list.T"])
def test_union(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, x:int or float) -> float
class `~unknown2`:
def __add__(self, x:bytearray) -> bytearray
""")
self.assertItemsEqual(["float"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_containers(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
def foo(self, x: list<bool>) -> int
class A(object):
def foo(self, x: list<int>) -> int
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
def test_type_parameters(self):
mapping = self.parse_and_solve("""
class A<T>:
def foo(self) -> ?
def bar(self, x: T) -> ?
class `~unknown1`:
def foo(self) -> ?
def bar(self, x: int) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["int"], mapping["~unknown1.A.T"])
def test_generic_against_generic(self):
mapping = self.parse_and_solve("""
class A(nothing):
def f(self, x: list<int>) -> ?
def g(self, x: list<float>) -> ?
class B(nothing):
def f(self, x: set<int>) -> ?
def g(self, x: list<int>) -> ?
class `~unknown1`:
def f(self, x: list<int>) -> ?
class `~unknown2`:
def g(self, x: list<int>) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["B"], mapping["~unknown2"])
def test_unknown_against_generic(self):
mapping = self.parse_and_solve("""
def f(A: `~unknown0`) -> list<`~unknown8`>
class `~unknown0`(nothing):
def values(self) -> `~unknown2`
class `~unknown2`(nothing):
def __iter__(self) -> `~unknown4`
class `~unknown4`(nothing):
def next(self) -> `~unknown6`
class `~unknown6`(nothing):
def __sub__(self, _1: float) -> `~unknown8`
class `~unknown8`(nothing):
pass
""")
self.assertItemsEqual(["dict"], mapping["~unknown0"])
self.assertContainsSubset(["complex", "float"], mapping["~unknown0.dict.V"])
self.assertItemsEqual(["list"], mapping["~unknown2"])
self.assertItemsEqual(["listiterator"], mapping["~unknown4"])
self.assertContainsSubset(["complex", "float"], mapping["~unknown6"])
self.assertContainsSubset(["complex", "float"], mapping["~unknown8"])
def test_subclass_of_elements(self):
mapping = self.parse_and_solve("""
class A(nothing):
def f(self, x: list<int>) -> list<int>
class `~unknown1`:
def f(self, x: list<bool>) -> ?
class `~unknown2`:
def f(self, x: ?) -> list<object>
class `~unknown3`:
def f(self, x: list<object>) -> ?
class `~unknown4`:
def f(self, x: ?) -> list<bool>
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual([], mapping["~unknown2"])
self.assertItemsEqual([], mapping["~unknown3"])
self.assertItemsEqual(["A"], mapping["~unknown4"])
def test_subclass(self):
mapping = self.parse_and_solve("""
class A(nothing):
pass
class B(A):
pass
class AA:
def foo(self, x: A) -> A
class AB:
def foo(self, x: A) -> B
class BA:
def foo(self, x: B) -> A
class BB:
def foo(self, x: B) -> B
class `~unknown1`:
def foo(self, x: A) -> A
class `~unknown2`:
def foo(self, x: A) -> B
class `~unknown3`:
def foo(self, x: B) -> A
class `~unknown4`:
def foo(self, x: B) -> B
""")
self.assertItemsEqual(["AA"], mapping["~unknown1"])
self.assertItemsEqual(["AA", "AB"], mapping["~unknown2"])
self.assertItemsEqual(["AA", "BA"], mapping["~unknown3"])
self.assertItemsEqual(["AA", "AB", "BA", "BB"], mapping["~unknown4"])
def test_odd_superclass(self):
mapping = self.parse_and_solve("""
class A(nothing, nothing):
def foobar(self) -> ?
class B(?):
def foobar(self) -> ?
class C(A or B):
def foobar(self) -> ?
class D(list<int>):
def foobar(self) -> ?
class E<T>(T):
def foobar(self) -> ?
class `~unknown1`:
def foobar(self) -> ?
""")
self.assertContainsSubset(["A", "B", "C", "D", "E"], mapping["~unknown1"])
@unittest.skip("not implemented")
def test_unknown_superclass(self):
# E.g. "class A(x): def foobar(self): pass" with (unknown) x = type(3)
mapping = self.parse_and_solve("""
class `~unknown1`:
def __add__(self, _1: int) -> int
class A(`~unknown1`):
def foobar(self) -> NoneType
class `~unknown2`:
def __add__(self, _1: int) -> int
def foobar(self) -> NoneType
""")
self.assertItemsEqual(["int", "bool"], mapping["~unknown1"])
self.assertItemsEqual(["A"], mapping["~unknown2"])
def test_nothing(self):
mapping = self.parse_and_solve("""
class A(nothing):
def f(self, x:nothing) -> nothing
class B(nothing):
def f(self, x:int) -> nothing
class C(nothing):
def f(self, x:nothing) -> int
class D(nothing):
def f(self, x:int) -> int
class `~unknown1`:
def f(self, x:nothing) -> nothing
class `~unknown2`:
def f(self, x:int) -> nothing
class `~unknown3`:
def f(self, x:nothing) -> int
class `~unknown4`:
def f(self, x:int) -> int
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["B"], mapping["~unknown2"])
self.assertItemsEqual(["C"], mapping["~unknown3"])
self.assertItemsEqual(["D"], mapping["~unknown4"])
def test_unknown(self):
mapping = self.parse_and_solve("""
class A(?):
def f(self, x:?) -> ?
class B(?):
def f(self, x:int) -> ?
class C(?):
def f(self, x:?) -> int
class D(?):
def f(self, x:int) -> int
class `~unknown1`:
def f(self, x:?) -> ?
def f(self, x:int) -> ?
def f(self, x:?) -> int
def f(self, x:int) -> int
""")
convert_structural.log_info_mapping(mapping)
self.assertItemsEqual(["A", "B", "C", "D"], mapping["~unknown1"])
def test_union_left_right(self):
mapping = self.parse_and_solve("""
class A:
def f(self, x:int) -> int
class B:
def f(self, x:int) -> int or float
class C:
def f(self, x:int or float) -> int
class D:
def f(self, x:int or float) -> int or float
class `~unknown1`:
def f(self, x:int) -> int
class `~unknown2`:
def f(self, x:int or float) -> int
class `~unknown3`:
def f(self, x:int) -> int or float
""")
self.assertItemsEqual(["A", "B", "C", "D"], mapping["~unknown1"])
self.assertItemsEqual(["C", "D"], mapping["~unknown2"])
self.assertItemsEqual(["B", "D"], mapping["~unknown3"])
def test_different_lengths(self):
mapping = self.parse_and_solve("""
class A:
def f(self) -> ?
class B:
def f(self, x) -> ?
class C:
def f(self, x, y) -> ?
class `~unknown1`:
def f(self) -> ?
class `~unknown2`:
def f(self, x) -> ?
class `~unknown3`:
def f(self, x, y) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["B"], mapping["~unknown2"])
self.assertItemsEqual(["C"], mapping["~unknown3"])
def test_filter(self):
mapping = self.parse_and_solve("""
class A:
def f(self, x: int or bytearray) -> ?
class `~unknown1`:
def f(self, _1: `~unknown2`) -> ?
class `~unknown2`:
def capitalize(self) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["bytearray"], mapping["~unknown2"])
def test_partial(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
pass
class `~bool`:
def __and__(self, _1: `~unknown1`) -> bool
def __and__(self, _1: `~unknown2`) -> bool
class `~unknown2`:
pass
""")
self.assertItemsEqual(["bool", "int"], mapping["~unknown1"])
self.assertItemsEqual(["bool", "int"], mapping["~unknown2"])
def test_optional_parameters(self):
mapping = self.parse_and_solve("""
class A:
def f(self, ...) -> ?
class B:
def f(self, x, ...) -> ?
class C:
def f(self, x, y, ...) -> ?
class `~unknown1`:
def f(self) -> ?
class `~unknown2`:
def f(self, x) -> ?
class `~unknown3`:
def f(self, x, y) -> ?
class `~unknown4`:
def f(self, x, y, z) -> ?
""")
self.assertItemsEqual(["A"], mapping["~unknown1"])
self.assertItemsEqual(["A", "B"], mapping["~unknown2"])
self.assertItemsEqual(["A", "B", "C"], mapping["~unknown3"])
def test_listiterator(self):
self.parse_and_solve("""
class `~unknown1`:
pass
class `~listiterator`:
def next(self) -> `~unknown1`
def next(self) -> tuple<nothing>
""")
def test_enumerate(self):
self.parse_and_solve("""
class `~unknown1`:
pass
class `~enumerate`:
def __init__(self, iterable: list<`~unknown1`>) -> NoneType
def next(self) -> tuple<?>
""")
def test_call_builtin(self):
mapping = self.parse_and_solve("""
class `~unknown1`:
pass
class `~unknown2`:
pass
def `~round`(number: `~unknown1`) -> `~unknown2`
""")
self.assertIn("float", mapping["~unknown1"])
self.assertNotIn("str", mapping["~unknown1"])
def test_fibonacci(self):
mapping = self.parse_and_solve("""
def fib(n: `~unknown4`) -> int or `~unknown12`
def fib(n: `~unknown8` or int) -> int
def foo(x: `~unknown1`) -> `~unknown3` or int
class `~int`: # TODO(kramm): Make pytype add the ~
def __rmul__(self, y: `~unknown4`) -> int
def __eq__(self, y: int) -> `~unknown10` or bool
def __radd__(self, y: `~unknown1`) -> int
def __rsub__(self, y: `~unknown4`) -> int
class `~unknown1`(nothing):
def __add__(self, _1: int) -> `~unknown3`
class `~unknown3`(nothing):
pass
class `~unknown4`(nothing):
def __eq__(self, _1: int) -> `~unknown6`
def __sub__(self, _1: int) -> `~unknown8`
def __mul__(self, _1: int) -> `~unknown12`
class `~unknown6`(nothing):
pass
class `~unknown8`(nothing):
def __eq__(self, _1: int) -> `~unknown10`
class `~unknown10`(nothing):
pass
class `~unknown12`(nothing):
pass
""")
self.assertItemsEqual(["int"], mapping["~unknown4"])
def test_add(self):
mapping = self.parse_and_solve("""
def f(self, x: `~unknown4`) -> `~unknown6`
class `~unknown4`(nothing):
def __add__(self, _1: int) -> `~unknown6`
class `~unknown6`(nothing):
pass
""")
# TODO(pludemann): remove "bool" from list when we do the
# more strict definition of return (that is, not allowing
# "bool" just because it's a subclass of "int" in __builtin__.pytd
numbers = ["int", "complex", "float", "long", "bool"]
self.assertItemsEqual(numbers, mapping["~unknown4"])
self.assertItemsEqual(numbers, mapping["~unknown6"])
def test_subclasses(self):
mapping = self.parse_and_solve("""
class Foo:
def foo(self) -> Bar1
class Bar1:
def bar(self) -> complex
class Bar2(Bar1):
def bar(self) -> float
class `~unknown1`:
def foo(self) -> `~unknown2`
class `~unknown2`:
def bar(self) -> `~unknown3`
class `~unknown3`:
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown3"])
def test_match_builtin_function(self):
mapping = self.parse_and_solve("""
def baz(int) -> float
def baz(complex) -> complex
def `~baz`(`~unknown3`) -> `~unknown4`
class `~unknown3`:
pass
class `~unknown4`:
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown4"])
def test_subclasses2(self):
mapping = self.parse_and_solve("""
class Foo:
def foo(self) -> Bar1
class Bar1:
def bar(self) -> Bar1
class Bar2(Bar1):
def bar(self) -> Bar2
def baz(Bar1) -> complex
def baz(Bar2) -> float
def `~baz`(`~unknown3`) -> `~unknown4`
class `~unknown1`:
def foo(self) -> `~unknown2`
class `~unknown2`:
def bar(self) -> `~unknown3`
class `~unknown3`:
pass
class `~unknown4`:
pass
""")
self.assertItemsEqual(["complex", "float"], mapping["~unknown4"])
def test_convert(self):
sourcecode = textwrap.dedent("""
class A:
def foo(self, x: `~unknown1`) -> ?
def foobaz(self, x: int) -> int
class `~unknown1`:
def foobaz(self, x: int) -> int
""")
expected = textwrap.dedent("""
class A:
def foo(self, x: A) -> ?
def foobaz(self, x: int) -> int
""").lstrip()
ast = parser.parse_string(sourcecode)
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
self.assertMultiLineEqual(pytd.Print(ast), expected)
def test_convert_with_type_params(self):
sourcecode = textwrap.dedent("""
class A:
def foo(self, x: `~unknown1`) -> bool
class `~unknown1`(nothing):
def __setitem__(self, _1: str, _2: `~unknown2`) -> ?
def update(self, _1: NoneType or dict<nothing, nothing>) -> ?
class `~unknown2`(nothing):
def append(self, v:NoneType) -> NoneType
""")
expected = textwrap.dedent("""
class A:
def foo(self, x: dict<str, list<?>>) -> bool
""").lstrip()
ast = parser.parse_string(sourcecode)
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
self.assertMultiLineEqual(pytd.Print(ast), expected)
def test_isinstance(self):
sourcecode = textwrap.dedent("""
x: `~unknown1`
def `~isinstance`(object: int, class_or_type_or_tuple: tuple<nothing>) -> `~unknown1`
class `~unknown1`:
pass
""")
expected = textwrap.dedent("""
x: bool
""").strip()
ast = parser.parse_string(sourcecode)
ast = convert_structural.convert_pytd(ast, self.builtins_pytd)
self.assertMultiLineEqual(pytd.Print(ast), expected)
if __name__ == "__main__":
test_inference.main()
|
|
try:
import aiohttp.web
except ImportError:
print("The dashboard requires aiohttp to run.")
import sys
sys.exit(1)
import argparse
import copy
import datetime
import json
import logging
import os
import re
import threading
import time
import traceback
import yaml
import uuid
from base64 import b64decode
from collections import defaultdict
from operator import itemgetter
from typing import Dict
import grpc
from google.protobuf.json_format import MessageToDict
import ray
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
from ray.core.generated import reporter_pb2
from ray.core.generated import reporter_pb2_grpc
from ray.core.generated import core_worker_pb2
from ray.core.generated import core_worker_pb2_grpc
import ray.ray_constants as ray_constants
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
def to_unix_time(dt):
return (dt - datetime.datetime(1970, 1, 1)).total_seconds()
def round_resource_value(quantity):
if quantity.is_integer():
return int(quantity)
else:
return round(quantity, 2)
def format_resource(resource_name, quantity):
if resource_name == "object_store_memory" or resource_name == "memory":
# Convert to 50MiB chunks and then to GiB
quantity = quantity * (50 * 1024 * 1024) / (1024 * 1024 * 1024)
return "{} GiB".format(round_resource_value(quantity))
return "{}".format(round_resource_value(quantity))
def format_reply_id(reply):
if isinstance(reply, dict):
for k, v in reply.items():
if isinstance(v, dict) or isinstance(v, list):
format_reply_id(v)
else:
if k.endswith("Id"):
v = b64decode(v)
reply[k] = ray.utils.binary_to_hex(v)
elif isinstance(reply, list):
for item in reply:
format_reply_id(item)
def measures_to_dict(measures):
measures_dict = {}
for measure in measures:
tags = measure["tags"].split(",")[-1]
if "intValue" in measure:
measures_dict[tags] = measure["intValue"]
elif "doubleValue" in measure:
measures_dict[tags] = measure["doubleValue"]
return measures_dict
def b64_decode(reply):
return b64decode(reply).decode("utf-8")
class Dashboard(object):
"""A dashboard process for monitoring Ray nodes.
This dashboard is made up of a REST API which collates data published by
Reporter processes on nodes into a json structure, and a webserver
which polls said API for display purposes.
Attributes:
redis_client: A client used to communicate with the Redis server.
"""
def __init__(self,
host,
port,
redis_address,
temp_dir,
redis_password=None):
"""Initialize the dashboard object."""
self.host = host
self.port = port
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self.temp_dir = temp_dir
self.node_stats = NodeStats(redis_address, redis_password)
self.raylet_stats = RayletStats(redis_address, redis_password)
# Setting the environment variable RAY_DASHBOARD_DEV=1 disables some
# security checks in the dashboard server to ease development while
# using the React dev server. Specifically, when this option is set, we
# allow cross-origin requests to be made.
self.is_dev = os.environ.get("RAY_DASHBOARD_DEV") == "1"
self.app = aiohttp.web.Application()
self.setup_routes()
def setup_routes(self):
def forbidden() -> aiohttp.web.Response:
return aiohttp.web.Response(status=403, text="403 Forbidden")
def get_forbidden(_) -> aiohttp.web.Response:
return forbidden()
async def get_index(req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/index.html"))
async def get_favicon(req) -> aiohttp.web.Response:
return aiohttp.web.FileResponse(
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"client/build/favicon.ico"))
async def json_response(result=None, error=None,
ts=None) -> aiohttp.web.Response:
if ts is None:
ts = datetime.datetime.utcnow()
headers = None
if self.is_dev:
headers = {"Access-Control-Allow-Origin": "*"}
return aiohttp.web.json_response(
{
"result": result,
"timestamp": to_unix_time(ts),
"error": error,
},
headers=headers)
async def ray_config(_) -> aiohttp.web.Response:
try:
config_path = os.path.expanduser("~/ray_bootstrap_config.yaml")
with open(config_path) as f:
cfg = yaml.safe_load(f)
except Exception:
return await json_response(error="No config")
D = {
"min_workers": cfg["min_workers"],
"max_workers": cfg["max_workers"],
"initial_workers": cfg["initial_workers"],
"autoscaling_mode": cfg["autoscaling_mode"],
"idle_timeout_minutes": cfg["idle_timeout_minutes"],
}
try:
D["head_type"] = cfg["head_node"]["InstanceType"]
except KeyError:
D["head_type"] = "unknown"
try:
D["worker_type"] = cfg["worker_nodes"]["InstanceType"]
except KeyError:
D["worker_type"] = "unknown"
return await json_response(result=D)
async def node_info(req) -> aiohttp.web.Response:
now = datetime.datetime.utcnow()
D = self.node_stats.get_node_stats()
return await json_response(result=D, ts=now)
async def raylet_info(req) -> aiohttp.web.Response:
D = self.raylet_stats.get_raylet_stats()
workers_info_by_node = {
data["nodeId"]: data.get("workersStats")
for data in D.values()
}
infeasible_tasks = sum(
(data.get("infeasibleTasks", []) for data in D.values()), [])
actor_tree = self.node_stats.get_actor_tree(
workers_info_by_node, infeasible_tasks)
for address, data in D.items():
# process view data
measures_dicts = {}
for view_data in data["viewData"]:
view_name = view_data["viewName"]
if view_name in ("local_available_resource",
"local_total_resource",
"object_manager_stats"):
measures_dicts[view_name] = measures_to_dict(
view_data["measures"])
# process resources info
extra_info_strings = []
prefix = "ResourceName:"
for resource_name, total_resource in measures_dicts[
"local_total_resource"].items():
available_resource = measures_dicts[
"local_available_resource"].get(resource_name, .0)
resource_name = resource_name[len(prefix):]
extra_info_strings.append("{}: {} / {}".format(
resource_name,
format_resource(resource_name,
total_resource - available_resource),
format_resource(resource_name, total_resource)))
data["extraInfo"] = ", ".join(extra_info_strings) + "\n"
if os.environ.get("RAY_DASHBOARD_DEBUG"):
# process object store info
extra_info_strings = []
prefix = "ValueType:"
for stats_name in [
"used_object_store_memory", "num_local_objects"
]:
stats_value = measures_dicts[
"object_manager_stats"].get(
prefix + stats_name, .0)
extra_info_strings.append("{}: {}".format(
stats_name, stats_value))
data["extraInfo"] += ", ".join(extra_info_strings)
# process actor info
actor_tree_str = json.dumps(
actor_tree, indent=2, sort_keys=True)
lines = actor_tree_str.split("\n")
max_line_length = max(map(len, lines))
to_print = []
for line in lines:
to_print.append(line +
(max_line_length - len(line)) * " ")
data["extraInfo"] += "\n" + "\n".join(to_print)
result = {"nodes": D, "actors": actor_tree}
return await json_response(result=result)
async def launch_profiling(req) -> aiohttp.web.Response:
node_id = req.query.get("node_id")
pid = int(req.query.get("pid"))
duration = int(req.query.get("duration"))
profiling_id = self.raylet_stats.launch_profiling(
node_id=node_id, pid=pid, duration=duration)
return await json_response(str(profiling_id))
async def check_profiling_status(req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
return await json_response(
self.raylet_stats.check_profiling_status(profiling_id))
async def get_profiling_info(req) -> aiohttp.web.Response:
profiling_id = req.query.get("profiling_id")
return aiohttp.web.json_response(
self.raylet_stats.get_profiling_info(profiling_id))
async def kill_actor(req) -> aiohttp.web.Response:
actor_id = req.query.get("actor_id")
ip_address = req.query.get("ip_address")
port = req.query.get("port")
return await json_response(
self.raylet_stats.kill_actor(actor_id, ip_address, port))
async def logs(req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.node_stats.get_logs(hostname, pid)
return await json_response(result=result)
async def errors(req) -> aiohttp.web.Response:
hostname = req.query.get("hostname")
pid = req.query.get("pid")
result = self.node_stats.get_errors(hostname, pid)
return await json_response(result=result)
self.app.router.add_get("/", get_index)
self.app.router.add_get("/favicon.ico", get_favicon)
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client/build")
if not os.path.isdir(build_dir):
raise ValueError(
"Dashboard build directory not found at '{}'. If installing "
"from source, please follow the additional steps required to "
"build the dashboard: "
"cd python/ray/dashboard/client && npm ci && npm run build"
.format(build_dir))
static_dir = os.path.join(build_dir, "static")
self.app.router.add_static("/static", static_dir)
speedscope_dir = os.path.join(build_dir, "speedscope-1.5.3")
self.app.router.add_static("/speedscope", speedscope_dir)
self.app.router.add_get("/api/ray_config", ray_config)
self.app.router.add_get("/api/node_info", node_info)
self.app.router.add_get("/api/raylet_info", raylet_info)
self.app.router.add_get("/api/launch_profiling", launch_profiling)
self.app.router.add_get("/api/check_profiling_status",
check_profiling_status)
self.app.router.add_get("/api/get_profiling_info", get_profiling_info)
self.app.router.add_get("/api/kill_actor", kill_actor)
self.app.router.add_get("/api/logs", logs)
self.app.router.add_get("/api/errors", errors)
self.app.router.add_get("/{_}", get_forbidden)
def log_dashboard_url(self):
url = ray.services.get_webui_url_from_redis(self.redis_client)
with open(os.path.join(self.temp_dir, "dashboard_url"), "w") as f:
f.write(url)
logger.info("Dashboard running on {}".format(url))
def run(self):
self.log_dashboard_url()
self.node_stats.start()
self.raylet_stats.start()
aiohttp.web.run_app(self.app, host=self.host, port=self.port)
class NodeStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.redis_key = "{}.*".format(ray.gcs_utils.REPORTER_CHANNEL)
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._node_stats = {}
self._addr_to_owner_addr = {}
self._addr_to_actor_id = {}
self._addr_to_extra_info_dict = {}
self._node_stats_lock = threading.Lock()
self._default_info = {
"actorId": "",
"children": {},
"currentTaskFuncDesc": [],
"ipAddress": "",
"isDirectCall": False,
"jobId": "",
"numExecutedTasks": 0,
"numLocalObjects": 0,
"numObjectIdsInScope": 0,
"port": 0,
"state": 0,
"taskQueueLength": 0,
"usedObjectStoreMemory": 0,
"usedResources": {},
}
# Mapping from IP address to PID to list of log lines
self._logs = defaultdict(lambda: defaultdict(list))
# Mapping from IP address to PID to list of error messages
self._errors = defaultdict(lambda: defaultdict(list))
ray.state.state._initialize_global_state(
redis_address=redis_address, redis_password=redis_password)
super().__init__()
def calculate_log_counts(self):
return {
ip: {
pid: len(logs_for_pid)
for pid, logs_for_pid in logs_for_ip.items()
}
for ip, logs_for_ip in self._logs.items()
}
def calculate_error_counts(self):
return {
ip: {
pid: len(errors_for_pid)
for pid, errors_for_pid in errors_for_ip.items()
}
for ip, errors_for_ip in self._errors.items()
}
def purge_outdated_stats(self):
def current(then, now):
if (now - then) > 5:
return False
return True
now = to_unix_time(datetime.datetime.utcnow())
self._node_stats = {
k: v
for k, v in self._node_stats.items() if current(v["now"], now)
}
def get_node_stats(self) -> Dict:
with self._node_stats_lock:
self.purge_outdated_stats()
node_stats = sorted(
(v for v in self._node_stats.values()),
key=itemgetter("boot_time"))
return {
"clients": node_stats,
"log_counts": self.calculate_log_counts(),
"error_counts": self.calculate_error_counts(),
}
def get_actor_tree(self, workers_info_by_node, infeasible_tasks) -> Dict:
now = time.time()
# construct flattened actor tree
flattened_tree = {"root": {"children": {}}}
child_to_parent = {}
with self._node_stats_lock:
for addr, actor_id in self._addr_to_actor_id.items():
flattened_tree[actor_id] = copy.deepcopy(self._default_info)
flattened_tree[actor_id].update(
self._addr_to_extra_info_dict[addr])
parent_id = self._addr_to_actor_id.get(
self._addr_to_owner_addr[addr], "root")
child_to_parent[actor_id] = parent_id
for node_id, workers_info in workers_info_by_node.items():
for worker_info in workers_info:
if "coreWorkerStats" in worker_info:
core_worker_stats = worker_info["coreWorkerStats"]
addr = (core_worker_stats["ipAddress"],
str(core_worker_stats["port"]))
if addr in self._addr_to_actor_id:
actor_info = flattened_tree[self._addr_to_actor_id[
addr]]
if "currentTaskFuncDesc" in core_worker_stats:
core_worker_stats[
"currentTaskFuncDesc"] = list(
map(
b64_decode, core_worker_stats[
"currentTaskFuncDesc"]))
format_reply_id(core_worker_stats)
actor_info.update(core_worker_stats)
actor_info["averageTaskExecutionSpeed"] = round(
actor_info["numExecutedTasks"] /
(now - actor_info["timestamp"] / 1000), 2)
actor_info["nodeId"] = node_id
actor_info["pid"] = worker_info["pid"]
for infeasible_task in infeasible_tasks:
actor_id = ray.utils.binary_to_hex(
b64decode(
infeasible_task["actorCreationTaskSpec"]["actorId"]))
caller_addr = (infeasible_task["callerAddress"]["ipAddress"],
str(infeasible_task["callerAddress"]["port"]))
caller_id = self._addr_to_actor_id.get(caller_addr, "root")
child_to_parent[actor_id] = caller_id
infeasible_task["state"] = -1
infeasible_task["functionDescriptor"] = list(
map(b64_decode, infeasible_task["functionDescriptor"]))
format_reply_id(infeasible_tasks)
flattened_tree[actor_id] = infeasible_task
# construct actor tree
actor_tree = flattened_tree
for actor_id, parent_id in child_to_parent.items():
actor_tree[parent_id]["children"][actor_id] = actor_tree[actor_id]
return actor_tree["root"]["children"]
def get_logs(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
logs = self._logs.get(ip, {})
if pid:
logs = {pid: logs.get(pid, [])}
return logs
def get_errors(self, hostname, pid):
ip = self._node_stats.get(hostname, {"ip": None})["ip"]
errors = self._errors.get(ip, {})
if pid:
errors = {pid: errors.get(pid, [])}
return errors
def run(self):
p = self.redis_client.pubsub(ignore_subscribe_messages=True)
p.psubscribe(self.redis_key)
logger.info("NodeStats: subscribed to {}".format(self.redis_key))
log_channel = ray.gcs_utils.LOG_FILE_CHANNEL
p.subscribe(log_channel)
logger.info("NodeStats: subscribed to {}".format(log_channel))
error_channel = ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB")
p.subscribe(error_channel)
logger.info("NodeStats: subscribed to {}".format(error_channel))
actor_channel = ray.gcs_utils.TablePubsub.Value("ACTOR_PUBSUB")
p.subscribe(actor_channel)
logger.info("NodeStats: subscribed to {}".format(actor_channel))
current_actor_table = ray.actors()
with self._node_stats_lock:
for actor_data in current_actor_table.values():
addr = (actor_data["Address"]["IPAddress"],
str(actor_data["Address"]["Port"]))
owner_addr = (actor_data["OwnerAddress"]["IPAddress"],
str(actor_data["OwnerAddress"]["Port"]))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = actor_data["ActorID"]
self._addr_to_extra_info_dict[addr] = {
"jobId": actor_data["JobID"],
"state": actor_data["State"],
"isDirectCall": actor_data["IsDirectCall"],
"timestamp": actor_data["Timestamp"]
}
for x in p.listen():
try:
with self._node_stats_lock:
channel = ray.utils.decode(x["channel"])
data = x["data"]
if channel == log_channel:
data = json.loads(ray.utils.decode(data))
ip = data["ip"]
pid = str(data["pid"])
self._logs[ip][pid].extend(data["lines"])
elif channel == str(error_channel):
gcs_entry = ray.gcs_utils.GcsEntry.FromString(data)
error_data = ray.gcs_utils.ErrorTableData.FromString(
gcs_entry.entries[0])
message = error_data.error_message
message = re.sub(r"\x1b\[\d+m", "", message)
match = re.search(r"\(pid=(\d+), ip=(.*?)\)", message)
if match:
pid = match.group(1)
ip = match.group(2)
self._errors[ip][pid].append({
"message": message,
"timestamp": error_data.timestamp,
"type": error_data.type
})
elif channel == str(actor_channel):
gcs_entry = ray.gcs_utils.GcsEntry.FromString(data)
actor_data = ray.gcs_utils.ActorTableData.FromString(
gcs_entry.entries[0])
addr = (actor_data.address.ip_address,
str(actor_data.address.port))
owner_addr = (actor_data.owner_address.ip_address,
str(actor_data.owner_address.port))
self._addr_to_owner_addr[addr] = owner_addr
self._addr_to_actor_id[addr] = ray.utils.binary_to_hex(
actor_data.actor_id)
self._addr_to_extra_info_dict[addr] = {
"jobId": ray.utils.binary_to_hex(
actor_data.job_id),
"state": actor_data.state,
"isDirectCall": actor_data.is_direct_call,
"timestamp": actor_data.timestamp
}
else:
data = json.loads(ray.utils.decode(data))
self._node_stats[data["hostname"]] = data
except Exception:
logger.exception(traceback.format_exc())
continue
class RayletStats(threading.Thread):
def __init__(self, redis_address, redis_password=None):
self.nodes_lock = threading.Lock()
self.nodes = []
self.stubs = {}
self.reporter_stubs = {}
self.redis_client = ray.services.create_redis_client(
redis_address, password=redis_password)
self._raylet_stats_lock = threading.Lock()
self._raylet_stats = {}
self._profiling_stats = {}
self.update_nodes()
super().__init__()
def update_nodes(self):
with self.nodes_lock:
self.nodes = ray.nodes()
node_ids = [node["NodeID"] for node in self.nodes]
# First remove node connections of disconnected nodes.
for node_id in self.stubs.keys():
if node_id not in node_ids:
stub = self.stubs.pop(node_id)
stub.close()
reporter_stub = self.reporter_stubs.pop(node_id)
reporter_stub.close()
# Now add node connections of new nodes.
for node in self.nodes:
node_id = node["NodeID"]
if node_id not in self.stubs:
node_ip = node["NodeManagerAddress"]
channel = grpc.insecure_channel("{}:{}".format(
node_ip, node["NodeManagerPort"]))
stub = node_manager_pb2_grpc.NodeManagerServiceStub(
channel)
self.stubs[node_id] = stub
# Block wait until the reporter for the node starts.
while True:
reporter_port = self.redis_client.get(
"REPORTER_PORT:{}".format(node_ip))
if reporter_port:
break
reporter_channel = grpc.insecure_channel("{}:{}".format(
node_ip, int(reporter_port)))
reporter_stub = reporter_pb2_grpc.ReporterServiceStub(
reporter_channel)
self.reporter_stubs[node_id] = reporter_stub
assert len(self.stubs) == len(
self.reporter_stubs), (self.stubs.keys(),
self.reporter_stubs.keys())
def get_raylet_stats(self) -> Dict:
with self._raylet_stats_lock:
return copy.deepcopy(self._raylet_stats)
def launch_profiling(self, node_id, pid, duration):
profiling_id = str(uuid.uuid4())
def _callback(reply_future):
reply = reply_future.result()
with self._raylet_stats_lock:
self._profiling_stats[profiling_id] = reply
reporter_stub = self.reporter_stubs[node_id]
reply_future = reporter_stub.GetProfilingStats.future(
reporter_pb2.GetProfilingStatsRequest(pid=pid, duration=duration))
reply_future.add_done_callback(_callback)
return profiling_id
def check_profiling_status(self, profiling_id):
with self._raylet_stats_lock:
is_present = profiling_id in self._profiling_stats
if is_present:
reply = self._profiling_stats[profiling_id]
if reply.stderr:
return {"status": "error", "error": reply.stderr}
else:
return {"status": "finished"}
else:
return {"status": "pending"}
def get_profiling_info(self, profiling_id):
with self._raylet_stats_lock:
profiling_stats = self._profiling_stats.get(profiling_id)
assert profiling_stats, "profiling not finished"
return json.loads(profiling_stats.profiling_stats)
def kill_actor(self, actor_id, ip_address, port):
channel = grpc.insecure_channel("{}:{}".format(ip_address, int(port)))
stub = core_worker_pb2_grpc.CoreWorkerServiceStub(channel)
def _callback(reply_future):
_ = reply_future.result()
reply_future = stub.KillActor.future(
core_worker_pb2.KillActorRequest(
intended_actor_id=ray.utils.hex_to_binary(actor_id)))
reply_future.add_done_callback(_callback)
return {}
def run(self):
counter = 0
while True:
time.sleep(1.0)
replies = {}
for node in self.nodes:
node_id = node["NodeID"]
stub = self.stubs[node_id]
reply = stub.GetNodeStats(
node_manager_pb2.GetNodeStatsRequest(), timeout=2)
reply_dict = MessageToDict(reply)
reply_dict["nodeId"] = node_id
replies[node["NodeManagerAddress"]] = reply_dict
with self._raylet_stats_lock:
for address, reply_dict in replies.items():
self._raylet_stats[address] = reply_dict
counter += 1
# From time to time, check if new nodes have joined the cluster
# and update self.nodes
if counter % 10:
self.update_nodes()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=("Parse Redis server for the "
"dashboard to connect to."))
parser.add_argument(
"--host",
required=True,
type=str,
help="The host to use for the HTTP server.")
parser.add_argument(
"--port",
required=True,
type=int,
help="The port to use for the HTTP server.")
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="The address to use for Redis.")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--temp-dir",
required=False,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.")
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
try:
dashboard = Dashboard(
args.host,
args.port,
args.redis_address,
args.temp_dir,
redis_password=args.redis_password,
)
dashboard.run()
except Exception as e:
# Something went wrong, so push an error to all drivers.
redis_client = ray.services.create_redis_client(
args.redis_address, password=args.redis_password)
traceback_str = ray.utils.format_error_message(traceback.format_exc())
message = ("The dashboard on node {} failed with the following "
"error:\n{}".format(os.uname()[1], traceback_str))
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_DIED_ERROR, message)
raise e
|
|
import json
import model
__author__ = 'wangc31'
class Resource(object):
"""This class represents REST resource model"""
def __init__(self, raw_resource):
"""
This is the model for REST resources. Actually it stores the data of resource in a dictionary.
Besides, a resource is linkable, which means it contains a list of links.
:param raw_resource: the raw resource is a dictionary.
"""
self._raw_resource_ = raw_resource
self._rest_links_ = []
self._init_links()
def _init_links(self):
"""
Initialize the links of a resource
:return:
"""
if self.is_key_existing('links'):
self._rest_links_ += [self._generate_link(link)
for link in self.get('links')
if self._is_valid_link(link)]
def keys(self):
"""
Get the top level keys of a resource
:return: top level keys
"""
return self._raw_resource_.keys()
def get(self, key):
"""
Get the attribute value according to the key
:param key: attribute key
:return: attribute value
"""
return self._raw_resource_.get(key)
def put(self, key, value):
"""
Add the attribute key and value
:param key: attribute key
:param value: attribute value
:return:
"""
self._raw_resource_[key] = value
def all_links(self):
"""
Get all the links
:return: links
"""
return self._rest_links_
def find_link(self, link_rel, title=None):
"""
Get the link according to link relation and title
:param link_rel: link relation
:param title: link title
:return: matched link
"""
for link in self._rest_links_:
if link.rel == link_rel.rel and link.hreftemplate == link_rel.hreftemplate and (
title is None or title == link.title):
return link
return None
def entry_count(self):
"""
Get count of the entries if the resource is a collection
:return: count of entries
"""
return len(self.get('entries'))
def get_entry(self, index):
"""
Get an entry at position index
:param index: entry index
:return: entry
"""
entry = Resource(self.get('entries')[index])
return entry
def get_entries(self):
"""
Get the collection of entries
:return: entries
"""
if not self.is_key_existing('entries'):
return []
return [Resource(raw_entry)
for raw_entry in self.get('entries')]
def is_key_existing(self, key):
"""
Check whether an attribute key exists
:param key: the attribute key
:return: True if existing
"""
return key in self._raw_resource_
def raw_resource(self):
"""
Get the raw resource, which is actually a dictionary
:return: raw resource
"""
return self._raw_resource_
def representation(self, indent=4):
"""
Get the representation of the raw resource, which is in JSON
:param indent: indent of JSON representation
:return: JSON representation
"""
return json.dumps(self._raw_resource_, indent=indent)
def reference(self, indent=4):
"""
Get the reference of a resource in JSON, like:
{"reference": "http://localhost:8080/dctm-rest/repositories/REPO/objects/090000058000251a"}
:param indent: indent of JSON representation
:return: reference in JSON
"""
reference = {'href': self.find_link(model.RestLink.REL_SELF).href}
return json.dumps(reference, indent=indent)
@staticmethod
def _generate_link(link_dict):
"""
Generate instance of model.Link from link in raw resource
:param link_dict: the link in the raw resource
:return: instance of model.Link
"""
if 'href' in link_dict:
return model.RestLink.Link(link_dict['rel'], link_dict['href'], False,
link_dict['title'] if 'title' in link_dict else None)
elif 'hreftemplate' in link_dict:
return model.RestLink.Link(link_dict['rel'], link_dict['hreftemplate'], True)
return None
@staticmethod
def _is_valid_link(link_dict):
"""
Check whether a link in raw resource is valid
:param link_dict: link in raw resource
:return: True if the link is valid
"""
if 'rel' in link_dict and ('href' in link_dict or 'hreftemplate' in link_dict):
return True
else:
return False
def __repr__(self):
return 'Resource(%r)' % self._raw_resource_
def __str__(self):
return self.representation()
class Home(Resource):
"""
This is the model of home resource
"""
def __init__(self, resource):
"""
Initialize the home resource
:param resource: data in response of the entry url
"""
Resource.__init__(self, resource.raw_resource())
def get_home_entry_link(self, link_rel):
"""
Get link from home resource according to link relation
:param link_rel: link relation
:return: matched link
"""
for key in self.get('resources'):
if key == link_rel.rel:
return model.RestLink.Link(key, self.get('resources').get(key).get('href'))
def get_product_info_link(self):
return self.get_home_entry_link(model.RestLink.REL_ABOUT)
def get_home_entry_methods(self, rel):
"""
Get supported HTTP methods for a link specified by the link relation
:param rel: link relation
:return: array of support HTTP methods
"""
for key in self.get('resources'):
if key == rel.link_rel:
return self.get('resources').get(key).get('hints').get('allow')
def get_home_entry_media_types(self, rel):
"""
Get supported media types for a link specified by the link relation
:param rel: link relation
:return: array supported media types
"""
for key in self.get('resources'):
if key == rel.link_rel:
return self.get('resources').get(key).get('hints').get('representations')
def __repr__(self):
return 'Home(%r)' % self._raw_resource_
def __str__(self):
return super(Home, self).__str__()
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.crystal
~~~~~~~~~~~~~~~~~~~~~~~
Lexer for Crystal.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import ExtendedRegexLexer, include, \
bygroups, default, LexerContext, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error
__all__ = ['CrystalLexer']
line_re = re.compile('.*?\n')
CRYSTAL_OPERATORS = [
'!=', '!~', '!', '%', '&&', '&', '**', '*', '+', '-', '/', '<=>', '<<', '<=', '<',
'===', '==', '=~', '=', '>=', '>>', '>', '[]=', '[]?', '[]', '^', '||', '|', '~'
]
class CrystalLexer(ExtendedRegexLexer):
"""
For `Crystal <http://crystal-lang.org>`_ source code.
.. versionadded:: 2.2
"""
name = 'Crystal'
aliases = ['cr', 'crystal']
filenames = ['*.cr']
mimetypes = ['text/x-crystal']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Crystal...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), String.Delimiter, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
yield from self.get_tokens_unprocessed(context=ctx)
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), String.Delimiter, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_crystalstrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[imsx]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
(r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
(words(CRYSTAL_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
# This allows arbitrary text after '\ for simplicity
(r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
(r':"', String.Symbol, 'simple-sym'),
# Crystal doesn't have "symbol:"s but this simplifies function args
(r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, bracecc, name in \
('\\{', '\\}', '{}', 'cb'), \
('\\[', '\\]', '\\[\\]', 'sb'), \
('\\(', '\\)', '()', 'pa'), \
('<', '>', '<>', 'ab'):
states[name+'-intp-string'] = [
(r'\\' + lbrace, String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
states['strings'].append((r'%' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + bracecc + ']', String.Other),
(lbrace, String.Other, '#push'),
(rbrace, String.Other, '#pop'),
(r'[\\#' + bracecc + ']', String.Other),
(r'[^\\#' + bracecc + ']+', String.Other),
]
# http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html
states['strings'].append((r'%[wi]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + bracecc + ']', String.Regex),
(lbrace, String.Regex, '#push'),
(rbrace + '[imsx]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + bracecc + ']', String.Regex),
(r'[^\\#' + bracecc + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([\[{(<]))((?:\\\2|(?!\2).)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
# keywords
(words('''
abstract asm as begin break case do else elsif end ensure extend ifdef if
include instance_sizeof next of pointerof private protected rescue return
require sizeof super then typeof unless until when while with yield
'''.split(), suffix=r'\b'), Keyword),
(words(['true', 'false', 'nil'], suffix=r'\b'), Keyword.Constant),
# start of function, class and module names
(r'(module|lib)(\s+)([a-zA-Z_]\w*(?:::[a-zA-Z_]\w*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def|fun|macro)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class|struct|union|type|alias|enum)(\s+)((?:[a-zA-Z_]\w*::)*)',
bygroups(Keyword, Text, Name.Namespace), 'classname'),
(r'(self|out|uninitialized)\b|(is_a|responds_to)\?', Keyword.Pseudo),
# macros
(words('''
debugger record pp assert_responds_to spawn parallel
getter setter property delegate def_hash def_equals def_equals_and_hash
forward_missing_to
'''.split(), suffix=r'\b'), Name.Builtin.Pseudo),
(r'getter[!?]|property[!?]|__(DIR|FILE|LINE)__\b', Name.Builtin.Pseudo),
# builtins
# http://crystal-lang.org/api/toplevel.html
(words('''
Object Value Struct Reference Proc Class Nil Symbol Enum Void
Bool Number Int Int8 Int16 Int32 Int64 UInt8 UInt16 UInt32 UInt64
Float Float32 Float64 Char String
Pointer Slice Range Exception Regex
Mutex StaticArray Array Hash Set Tuple Deque Box Process File
Dir Time Channel Concurrent Scheduler
abort at_exit caller delay exit fork future get_stack_top gets
lazy loop main p print printf puts
raise rand read_line sleep sprintf system with_color
'''.split(), prefix=r'(?<!\.)', suffix=r'\b'), Name.Builtin),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)',
heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!:])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=\.index\s)|'
r'(?<=\.scan\s)|'
r'(?<=\.sub\s)|'
r'(?<=\.sub!\s)|'
r'(?<=\.gsub\s)|'
r'(?<=\.gsub!\s)|'
r'(?<=\.match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls or subscripts)
(r'(?<=\(|,|\[)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/)(?![\s=])', bygroups(Text, String.Regex),
'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0o[0-7]+(?:_[0-7]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
# 3 separate expressions for floats because any of the 3 optional
# parts makes it a float
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)'
r'(?:_?f[0-9]+)?)(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'((?:0(?![0-9])|[1-9][\d_]*)(?:\.\d[\d_]*)?(?:e[+-]?[0-9]+)?'
r'(?:_?f[0-9]+))(\s*)([/?])?',
bygroups(Number.Float, Text, Operator)),
(r'(0\b|[1-9][\d]*(?:_\d+)*(?:_?[iu][0-9]+)?)\b(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_]\w*', Name.Variable.Class),
(r'@[a-zA-Z_]\w*', Name.Variable.Instance),
(r'\$\w+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"^-]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\befnrtv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][A-Z_]+\b', Name.Constant),
# macro expansion
(r'\{%', String.Interpol, 'in-macro-control'),
(r'\{\{', String.Interpol, 'in-macro-expr'),
# attributes
(r'(@\[)(\s*)([A-Z]\w*)',
bygroups(Operator, Text, Name.Decorator), 'in-attr'),
# this is needed because Crystal attributes can look
# like keywords (class) or like this: ` ?!?
(words(CRYSTAL_OPERATORS, prefix=r'(\.|::)'),
bygroups(Operator, Name.Operator)),
(r'(\.|::)([a-zA-Z_]\w*[!?]?|[*%&^`~+\-/\[<>=])',
bygroups(Operator, Name)),
# Names can end with [!?] unless it's "!="
(r'[a-zA-Z_]\w*(?:[!?](?!=))?', Name),
(r'(\[|\]\??|\*\*|<=>?|>=|<<?|>>?|=~|===|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'(?:([a-zA-Z_]\w*)(\.))?'
r'([a-zA-Z_]\w*[!?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
default('#pop')
],
'classname': [
(r'[A-Z_]\w*', Name.Class),
(r'(\()(\s*)([A-Z_]\w*)(\s*)(\))',
bygroups(Punctuation, Text, Name.Class, Text, Punctuation)),
default('#pop')
],
'in-intp': [
(r'\{', String.Interpol, '#push'),
(r'\}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#\{', String.Interpol, 'in-intp'),
],
'string-escaped': [
(r'\\([\\befnstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'string-intp-escaped': [
include('string-intp'),
include('string-escaped'),
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[imsx]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
],
'in-macro-control': [
(r'\{%', String.Interpol, '#push'),
(r'%\}', String.Interpol, '#pop'),
(r'for\b|in\b', Keyword),
include('root'),
],
'in-macro-expr': [
(r'\{\{', String.Interpol, '#push'),
(r'\}\}', String.Interpol, '#pop'),
include('root'),
],
'in-attr': [
(r'\[', Operator, '#push'),
(r'\]', Operator, '#pop'),
include('root'),
],
}
tokens.update(gen_crystalstrings_rules())
|
|
#!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""OSX platform implementation."""
import errno
import os
import sys
import _psutil_osx
import _psutil_posix
from psutil import _psposix
from psutil.error import AccessDenied, NoSuchProcess, TimeoutExpired
from psutil._compat import namedtuple
from psutil._common import *
__extra__all__ = []
# --- constants
NUM_CPUS = _psutil_osx.get_num_cpus()
BOOT_TIME = _psutil_osx.get_system_boot_time()
TOTAL_PHYMEM = _psutil_osx.get_virtual_mem()[0]
_PAGESIZE = os.sysconf("SC_PAGE_SIZE")
_cputimes_ntuple = namedtuple('cputimes', 'user nice system idle')
# --- functions
nt_virtmem_info = namedtuple('vmem', ' '.join([
# all platforms
'total', 'available', 'percent', 'used', 'free',
# OSX specific
'active',
'inactive',
'wired']))
def virtual_memory():
"""System virtual memory as a namedtuple."""
total, active, inactive, wired, free = _psutil_osx.get_virtual_mem()
avail = inactive + free
used = active + inactive + wired
percent = usage_percent((total - avail), total, _round=1)
return nt_virtmem_info(total, avail, percent, used, free,
active, inactive, wired)
def swap_memory():
"""Swap system memory as a (total, used, free, sin, sout) tuple."""
total, used, free, sin, sout = _psutil_osx.get_swap_mem()
percent = usage_percent(used, total, _round=1)
return nt_swapmeminfo(total, used, free, percent, sin, sout)
def get_system_cpu_times():
"""Return system CPU times as a namedtuple."""
user, nice, system, idle = _psutil_osx.get_system_cpu_times()
return _cputimes_ntuple(user, nice, system, idle)
def get_system_per_cpu_times():
"""Return system CPU times as a named tuple"""
ret = []
for cpu_t in _psutil_osx.get_system_per_cpu_times():
user, nice, system, idle = cpu_t
item = _cputimes_ntuple(user, nice, system, idle)
ret.append(item)
return ret
def disk_partitions(all=False):
retlist = []
partitions = _psutil_osx.get_disk_partitions()
for partition in partitions:
device, mountpoint, fstype, opts = partition
if device == 'none':
device = ''
if not all:
if not os.path.isabs(device) \
or not os.path.exists(device):
continue
ntuple = nt_partition(device, mountpoint, fstype, opts)
retlist.append(ntuple)
return retlist
def get_system_users():
retlist = []
rawlist = _psutil_osx.get_system_users()
for item in rawlist:
user, tty, hostname, tstamp = item
if tty == '~':
continue # reboot or shutdown
if not tstamp:
continue
nt = nt_user(user, tty or None, hostname or None, tstamp)
retlist.append(nt)
return retlist
get_pid_list = _psutil_osx.get_pid_list
pid_exists = _psposix.pid_exists
get_disk_usage = _psposix.get_disk_usage
network_io_counters = _psutil_osx.get_network_io_counters
disk_io_counters = _psutil_osx.get_disk_io_counters
# --- decorator
def wrap_exceptions(callable):
"""Call callable into a try/except clause so that if an
OSError EPERM exception is raised we translate it into
psutil.AccessDenied.
"""
def wrapper(self, *args, **kwargs):
try:
return callable(self, *args, **kwargs)
except OSError:
err = sys.exc_info()[1]
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
return wrapper
_status_map = {
_psutil_osx.SIDL : STATUS_IDLE,
_psutil_osx.SRUN : STATUS_RUNNING,
_psutil_osx.SSLEEP : STATUS_SLEEPING,
_psutil_osx.SSTOP : STATUS_STOPPED,
_psutil_osx.SZOMB : STATUS_ZOMBIE,
}
class Process(object):
"""Wrapper class around underlying C implementation."""
__slots__ = ["pid", "_process_name"]
def __init__(self, pid):
self.pid = pid
self._process_name = None
@wrap_exceptions
def get_process_name(self):
"""Return process name as a string of limited len (15)."""
return _psutil_osx.get_process_name(self.pid)
@wrap_exceptions
def get_process_exe(self):
return _psutil_osx.get_process_exe(self.pid)
@wrap_exceptions
def get_process_cmdline(self):
"""Return process cmdline as a list of arguments."""
if not pid_exists(self.pid):
raise NoSuchProcess(self.pid, self._process_name)
return _psutil_osx.get_process_cmdline(self.pid)
@wrap_exceptions
def get_process_ppid(self):
"""Return process parent pid."""
return _psutil_osx.get_process_ppid(self.pid)
@wrap_exceptions
def get_process_cwd(self):
return _psutil_osx.get_process_cwd(self.pid)
@wrap_exceptions
def get_process_uids(self):
real, effective, saved = _psutil_osx.get_process_uids(self.pid)
return nt_uids(real, effective, saved)
@wrap_exceptions
def get_process_gids(self):
real, effective, saved = _psutil_osx.get_process_gids(self.pid)
return nt_gids(real, effective, saved)
@wrap_exceptions
def get_process_terminal(self):
tty_nr = _psutil_osx.get_process_tty_nr(self.pid)
tmap = _psposix._get_terminal_map()
try:
return tmap[tty_nr]
except KeyError:
return None
@wrap_exceptions
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_osx.get_process_memory_info(self.pid)[:2]
return nt_meminfo(rss, vms)
_nt_ext_mem = namedtuple('meminfo', 'rss vms pfaults pageins')
@wrap_exceptions
def get_ext_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms, pfaults, pageins = _psutil_osx.get_process_memory_info(self.pid)
return self._nt_ext_mem(rss, vms,
pfaults * _PAGESIZE,
pageins * _PAGESIZE)
@wrap_exceptions
def get_cpu_times(self):
user, system = _psutil_osx.get_process_cpu_times(self.pid)
return nt_cputimes(user, system)
@wrap_exceptions
def get_process_create_time(self):
"""Return the start time of the process as a number of seconds since
the epoch."""
return _psutil_osx.get_process_create_time(self.pid)
@wrap_exceptions
def get_num_ctx_switches(self):
return nt_ctxsw(*_psutil_osx.get_process_num_ctx_switches(self.pid))
@wrap_exceptions
def get_process_num_threads(self):
"""Return the number of threads belonging to the process."""
return _psutil_osx.get_process_num_threads(self.pid)
@wrap_exceptions
def get_open_files(self):
"""Return files opened by process."""
if self.pid == 0:
return []
files = []
rawlist = _psutil_osx.get_process_open_files(self.pid)
for path, fd in rawlist:
if isfile_strict(path):
ntuple = nt_openfile(path, fd)
files.append(ntuple)
return files
@wrap_exceptions
def get_connections(self, kind='inet'):
"""Return etwork connections opened by a process as a list of
namedtuples.
"""
if kind not in conn_tmap:
raise ValueError("invalid %r kind argument; choose between %s"
% (kind, ', '.join([repr(x) for x in conn_tmap])))
families, types = conn_tmap[kind]
ret = _psutil_osx.get_process_connections(self.pid, families, types)
return [nt_connection(*conn) for conn in ret]
@wrap_exceptions
def get_num_fds(self):
if self.pid == 0:
return 0
return _psutil_osx.get_process_num_fds(self.pid)
@wrap_exceptions
def process_wait(self, timeout=None):
try:
return _psposix.wait_pid(self.pid, timeout)
except TimeoutExpired:
raise TimeoutExpired(self.pid, self._process_name)
@wrap_exceptions
def get_process_nice(self):
return _psutil_posix.getpriority(self.pid)
@wrap_exceptions
def set_process_nice(self, value):
return _psutil_posix.setpriority(self.pid, value)
@wrap_exceptions
def get_process_status(self):
code = _psutil_osx.get_process_status(self.pid)
if code in _status_map:
return _status_map[code]
return constant(-1, "?")
@wrap_exceptions
def get_process_threads(self):
"""Return the number of threads belonging to the process."""
rawlist = _psutil_osx.get_process_threads(self.pid)
retlist = []
for thread_id, utime, stime in rawlist:
ntuple = nt_thread(thread_id, utime, stime)
retlist.append(ntuple)
return retlist
nt_mmap_grouped = namedtuple('mmap',
'path rss private swapped dirtied ref_count shadow_depth')
nt_mmap_ext = namedtuple('mmap',
'addr perms path rss private swapped dirtied ref_count shadow_depth')
@wrap_exceptions
def get_memory_maps(self):
return _psutil_osx.get_process_memory_maps(self.pid)
|
|
# Copyright (c) 2016 Alex Meade. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NetApp Data ONTAP data motion library.
This library handles transferring data from a source to a destination. Its
responsibility is to handle this as efficiently as possible given the
location of the data's source and destination. This includes cloning,
SnapMirror, and copy-offload as improvements to brute force data transfer.
"""
from oslo_log import log
from oslo_utils import excutils
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI
from cinder.objects import fields
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.utils import utils as config_utils
from cinder.volume import utils as volume_utils
LOG = log.getLogger(__name__)
ENTRY_DOES_NOT_EXIST = "(entry doesn't exist)"
QUIESCE_RETRY_INTERVAL = 5
class DataMotionMixin(object):
def get_replication_backend_names(self, config):
"""Get the backend names for all configured replication targets."""
backend_names = []
replication_devices = config.safe_get('replication_device')
if replication_devices:
for replication_device in replication_devices:
backend_id = replication_device.get('backend_id')
if backend_id:
backend_names.append(backend_id)
return backend_names
def get_replication_backend_stats(self, config):
"""Get the driver replication info for merging into volume stats."""
backend_names = self.get_replication_backend_names(config)
if len(backend_names) > 0:
stats = {
'replication_enabled': True,
'replication_count': len(backend_names),
'replication_targets': backend_names,
'replication_type': 'async',
}
else:
stats = {'replication_enabled': False}
return stats
def _get_replication_aggregate_map(self, src_backend_name,
target_backend_name):
"""Get the aggregate mapping config between src and destination."""
aggregate_map = {}
config = config_utils.get_backend_configuration(src_backend_name)
all_replication_aggregate_maps = config.safe_get(
'netapp_replication_aggregate_map')
if all_replication_aggregate_maps:
for replication_aggregate_map in all_replication_aggregate_maps:
if (replication_aggregate_map.get('backend_id') ==
target_backend_name):
replication_aggregate_map.pop('backend_id')
aggregate_map = replication_aggregate_map
break
return aggregate_map
def get_snapmirrors(self, src_backend_name, dest_backend_name,
src_flexvol_name=None, dest_flexvol_name=None):
"""Get info regarding SnapMirror relationship/s for given params."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
src_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = src_backend_config.netapp_vserver
snapmirrors = dest_client.get_snapmirrors(
src_vserver, src_flexvol_name,
dest_vserver, dest_flexvol_name,
desired_attributes=[
'relationship-status',
'mirror-state',
'source-vserver',
'source-volume',
'destination-vserver',
'destination-volume',
'last-transfer-end-timestamp',
'lag-time',
])
return snapmirrors
def create_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Set up a SnapMirror relationship b/w two FlexVols (cinder pools)
1. Create SnapMirror relationship
2. Initialize data transfer asynchronously
If a SnapMirror relationship already exists and is broken off or
quiesced, resume and re-sync the mirror.
"""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
# 1. Create destination 'dp' FlexVol if it doesn't exist
if not dest_client.flexvol_exists(dest_flexvol_name):
self.create_destination_flexvol(src_backend_name,
dest_backend_name,
src_flexvol_name,
dest_flexvol_name)
# 2. Check if SnapMirror relationship exists
existing_mirrors = dest_client.get_snapmirrors(
src_vserver, src_flexvol_name, dest_vserver, dest_flexvol_name)
msg_payload = {
'src_vserver': src_vserver,
'src_volume': src_flexvol_name,
'dest_vserver': dest_vserver,
'dest_volume': dest_flexvol_name,
}
# 3. Create and initialize SnapMirror if it doesn't already exist
if not existing_mirrors:
# TODO(gouthamr): Change the schedule from hourly to a config value
msg = ("Creating a SnapMirror relationship between "
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
"%(dest_volume)s.")
LOG.debug(msg, msg_payload)
dest_client.create_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name,
schedule='hourly')
msg = ("Initializing SnapMirror transfers between "
"%(src_vserver)s:%(src_volume)s and %(dest_vserver)s:"
"%(dest_volume)s.")
LOG.debug(msg, msg_payload)
# Initialize async transfer of the initial data
dest_client.initialize_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
# 4. Try to repair SnapMirror if existing
else:
snapmirror = existing_mirrors[0]
if snapmirror.get('mirror-state') != 'snapmirrored':
try:
msg = ("SnapMirror between %(src_vserver)s:%(src_volume)s "
"and %(dest_vserver)s:%(dest_volume)s is in "
"'%(state)s' state. Attempting to repair it.")
msg_payload['state'] = snapmirror.get('mirror-state')
LOG.debug(msg, msg_payload)
dest_client.resume_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
dest_client.resync_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
except netapp_api.NaApiError:
LOG.exception(_LE("Could not re-sync SnapMirror."))
def delete_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name, release=True):
"""Ensure all information about a SnapMirror relationship is removed.
1. Abort SnapMirror
2. Delete the SnapMirror
3. Release SnapMirror to cleanup SnapMirror metadata and snapshots
"""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
# 1. Abort any ongoing transfers
try:
dest_client.abort_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name,
clear_checkpoint=False)
except netapp_api.NaApiError:
# Snapmirror is already deleted
pass
# 2. Delete SnapMirror Relationship and cleanup destination snapshots
try:
dest_client.delete_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
ENTRY_DOES_NOT_EXIST in e.message):
LOG.info(_LI('No SnapMirror relationship to delete.'))
exc_context.reraise = False
if release:
# If the source is unreachable, do not perform the release
try:
src_client = config_utils.get_client_for_backend(
src_backend_name, vserver_name=src_vserver)
except Exception:
src_client = None
# 3. Cleanup SnapMirror relationship on source
try:
if src_client:
src_client.release_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
except netapp_api.NaApiError as e:
with excutils.save_and_reraise_exception() as exc_context:
if (e.code == netapp_api.EOBJECTNOTFOUND or
e.code == netapp_api.ESOURCE_IS_DIFFERENT or
ENTRY_DOES_NOT_EXIST in e.message):
# Handle the case where the SnapMirror is already
# cleaned up
exc_context.reraise = False
def update_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Schedule a SnapMirror update on the backend."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
# Update SnapMirror
dest_client.update_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
def quiesce_then_abort(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Quiesce a SnapMirror and wait with retries before aborting."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
# 1. Attempt to quiesce, then abort
dest_client.quiesce_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
retries = (source_backend_config.netapp_snapmirror_quiesce_timeout /
QUIESCE_RETRY_INTERVAL)
@utils.retry(exception.NetAppDriverException,
interval=QUIESCE_RETRY_INTERVAL,
retries=retries, backoff_rate=1)
def wait_for_quiesced():
snapmirror = dest_client.get_snapmirrors(
src_vserver, src_flexvol_name, dest_vserver,
dest_flexvol_name,
desired_attributes=['relationship-status', 'mirror-state'])[0]
if snapmirror.get('relationship-status') != 'quiesced':
msg = _("SnapMirror relationship is not quiesced.")
raise exception.NetAppDriverException(reason=msg)
try:
wait_for_quiesced()
except exception.NetAppDriverException:
dest_client.abort_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name,
clear_checkpoint=False)
def break_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Break SnapMirror relationship.
1. Quiesce any ongoing SnapMirror transfers
2. Wait until SnapMirror finishes transfers and enters quiesced state
3. Break SnapMirror
4. Mount the destination volume so it is given a junction path
"""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
# 1. Attempt to quiesce, then abort
self.quiesce_then_abort(src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name)
# 2. Break SnapMirror
dest_client.break_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
# 3. Mount the destination volume and create a junction path
dest_client.mount_flexvol(dest_flexvol_name)
def resync_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Re-sync (repair / re-establish) SnapMirror relationship."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
dest_client.resync_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
def resume_snapmirror(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Resume SnapMirror relationship from a quiesced state."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
dest_client.resume_snapmirror(src_vserver,
src_flexvol_name,
dest_vserver,
dest_flexvol_name)
def create_destination_flexvol(self, src_backend_name, dest_backend_name,
src_flexvol_name, dest_flexvol_name):
"""Create a SnapMirror mirror target FlexVol for a given source."""
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name, vserver_name=dest_vserver)
source_backend_config = config_utils.get_backend_configuration(
src_backend_name)
src_vserver = source_backend_config.netapp_vserver
src_client = config_utils.get_client_for_backend(
src_backend_name, vserver_name=src_vserver)
provisioning_options = (
src_client.get_provisioning_options_from_flexvol(
src_flexvol_name)
)
# Remove size and volume_type
size = provisioning_options.pop('size', None)
if not size:
msg = _("Unable to read the size of the source FlexVol (%s) "
"to create a SnapMirror destination.")
raise exception.NetAppDriverException(msg % src_flexvol_name)
provisioning_options.pop('volume_type', None)
source_aggregate = provisioning_options.pop('aggregate')
aggregate_map = self._get_replication_aggregate_map(
src_backend_name, dest_backend_name)
if not aggregate_map.get(source_aggregate):
msg = _("Unable to find configuration matching the source "
"aggregate (%s) and the destination aggregate. Option "
"netapp_replication_aggregate_map may be incorrect.")
raise exception.NetAppDriverException(
message=msg % source_aggregate)
destination_aggregate = aggregate_map[source_aggregate]
# NOTE(gouthamr): The volume is intentionally created as a Data
# Protection volume; junction-path will be added on breaking
# the mirror.
dest_client.create_flexvol(dest_flexvol_name,
destination_aggregate,
size,
volume_type='dp',
**provisioning_options)
def ensure_snapmirrors(self, config, src_backend_name, src_flexvol_names):
"""Ensure all the SnapMirrors needed for whole-backend replication."""
backend_names = self.get_replication_backend_names(config)
for dest_backend_name in backend_names:
for src_flexvol_name in src_flexvol_names:
dest_flexvol_name = src_flexvol_name
self.create_snapmirror(src_backend_name,
dest_backend_name,
src_flexvol_name,
dest_flexvol_name)
def break_snapmirrors(self, config, src_backend_name, src_flexvol_names,
chosen_target):
"""Break all existing SnapMirror relationships for a given back end."""
failed_to_break = []
backend_names = self.get_replication_backend_names(config)
for dest_backend_name in backend_names:
for src_flexvol_name in src_flexvol_names:
dest_flexvol_name = src_flexvol_name
try:
self.break_snapmirror(src_backend_name,
dest_backend_name,
src_flexvol_name,
dest_flexvol_name)
except netapp_api.NaApiError:
msg = _("Unable to break SnapMirror between FlexVol "
"%(src)s and Flexvol %(dest)s. Associated volumes "
"will have their replication state set to error.")
payload = {
'src': ':'.join([src_backend_name, src_flexvol_name]),
'dest': ':'.join([dest_backend_name,
dest_flexvol_name]),
}
if dest_backend_name == chosen_target:
failed_to_break.append(src_flexvol_name)
LOG.exception(msg, payload)
return failed_to_break
def update_snapmirrors(self, config, src_backend_name, src_flexvol_names):
"""Update all existing SnapMirror relationships on a given back end."""
backend_names = self.get_replication_backend_names(config)
for dest_backend_name in backend_names:
for src_flexvol_name in src_flexvol_names:
dest_flexvol_name = src_flexvol_name
try:
self.update_snapmirror(src_backend_name,
dest_backend_name,
src_flexvol_name,
dest_flexvol_name)
except netapp_api.NaApiError:
# Ignore any errors since the current source may be
# unreachable
pass
def _choose_failover_target(self, backend_name, flexvols,
replication_targets):
target_lag_times = []
for target in replication_targets:
all_target_mirrors = self.get_snapmirrors(
backend_name, target, None, None)
flexvol_mirrors = self._filter_and_sort_mirrors(
all_target_mirrors, flexvols)
if not flexvol_mirrors:
msg = ("Ignoring replication target %(target)s because no "
"SnapMirrors were found for any of the flexvols "
"in (%(flexvols)s).")
payload = {
'flexvols': ', '.join(flexvols),
'target': target,
}
LOG.debug(msg, payload)
continue
target_lag_times.append(
{
'target': target,
'highest-lag-time': flexvol_mirrors[0]['lag-time'],
}
)
# The best target is one with the least 'worst' lag time.
best_target = (sorted(target_lag_times,
key=lambda x: int(x['highest-lag-time']))[0]
if len(target_lag_times) > 0 else {})
return best_target.get('target')
def _filter_and_sort_mirrors(self, mirrors, flexvols):
"""Return mirrors reverse-sorted by lag time.
The 'slowest' mirror determines the best update that occurred on a
given replication target.
"""
filtered_mirrors = list(filter(lambda x: x.get('destination-volume')
in flexvols, mirrors))
sorted_mirrors = sorted(filtered_mirrors,
key=lambda x: int(x.get('lag-time')),
reverse=True)
return sorted_mirrors
def _complete_failover(self, source_backend_name, replication_targets,
flexvols, volumes, failover_target=None):
"""Failover a backend to a secondary replication target."""
volume_updates = []
active_backend_name = failover_target or self._choose_failover_target(
source_backend_name, flexvols, replication_targets)
if active_backend_name is None:
msg = _("No suitable host was found to failover.")
raise exception.NetAppDriverException(msg)
source_backend_config = config_utils.get_backend_configuration(
source_backend_name)
# 1. Start an update to try to get a last minute transfer before we
# quiesce and break
self.update_snapmirrors(source_backend_config, source_backend_name,
flexvols)
# 2. Break SnapMirrors
failed_to_break = self.break_snapmirrors(source_backend_config,
source_backend_name,
flexvols, active_backend_name)
# 3. Update cinder volumes within this host
for volume in volumes:
replication_status = fields.ReplicationStatus.FAILED_OVER
volume_pool = volume_utils.extract_host(volume['host'],
level='pool')
if volume_pool in failed_to_break:
replication_status = 'error'
volume_update = {
'volume_id': volume['id'],
'updates': {
'replication_status': replication_status,
},
}
volume_updates.append(volume_update)
return active_backend_name, volume_updates
def _failover_host(self, volumes, secondary_id=None):
if secondary_id == self.backend_name:
msg = _("Cannot failover to the same host as the primary.")
raise exception.InvalidReplicationTarget(reason=msg)
replication_targets = self.get_replication_backend_names(
self.configuration)
if not replication_targets:
msg = _("No replication targets configured for backend "
"%s. Cannot failover.")
raise exception.InvalidReplicationTarget(reason=msg % self.host)
elif secondary_id and secondary_id not in replication_targets:
msg = _("%(target)s is not among replication targets configured "
"for back end %(host)s. Cannot failover.")
payload = {
'target': secondary_id,
'host': self.host,
}
raise exception.InvalidReplicationTarget(reason=msg % payload)
flexvols = self.ssc_library.get_ssc_flexvol_names()
try:
active_backend_name, volume_updates = self._complete_failover(
self.backend_name, replication_targets, flexvols, volumes,
failover_target=secondary_id)
except exception.NetAppDriverException as e:
msg = _("Could not complete failover: %s") % e
raise exception.UnableToFailOver(reason=msg)
# Update the ZAPI client to the backend we failed over to
self._update_zapi_client(active_backend_name)
self.failed_over = True
self.failed_over_backend_name = active_backend_name
return active_backend_name, volume_updates
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AuthProvider.config'
db.alter_column('sentry_authprovider', 'config', self.gf('jsonfield.fields.JSONField')())
def backwards(self, orm):
# Changing field 'AuthProvider.config'
db.alter_column('sentry_authprovider', 'config', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')())
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'unique_together': "(('name', 'checksum'),)", 'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
|
from __future__ import unicode_literals
from django.utils import six
from djblets.db.query import LocalDataQuerySet
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import webapi_request_fields
from djblets.webapi.responses import WebAPIResponsePaginated
from reviewboard.hostingsvcs.repository import RemoteRepository
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.resources import resources
class RemoteRepositoryResponsePaginated(WebAPIResponsePaginated):
"""Provides paginated reponses for lists of RemoteRepository objects.
This is a specialization of WebAPIResponsePaginated designed to
return lsits of RemoteRepository objects and to handle pagination in
a way that's compatible with the pagination models of HostingService.
"""
def __init__(self, request, queryset, *args, **kwargs):
self.paginator = queryset[0]
super(RemoteRepositoryResponsePaginated, self).__init__(
request, queryset=None, *args, **kwargs)
def has_prev(self):
return self.paginator.has_prev
def has_next(self):
return self.paginator.has_next
def get_prev_index(self):
return max(self.start - 1, 0)
def get_next_index(self):
return self.start + 1
def get_results(self):
return self.paginator.page_data
def get_total_results(self):
return None
def build_pagination_url(self, full_path, start, max_results,
query_parameters):
return '%s?start=%s%s' % (full_path, start, query_parameters)
class RemoteRepositoryResource(WebAPIResource):
"""Returns information on remote repositories on a hosting service.
This can be used to look up the information needed to connect to a
remote repository or to add a repository to Review Board. Only remote
repositories that are accessible to the linked hosting service account
(i.e., that of the parent resource) will be provided by this resource.
"""
name = 'remote_repository'
name_plural = 'remote_repositories'
model = RemoteRepository
model_object_key = 'id'
model_parent_key = 'hosting_service_account'
uri_object_key = 'repository_id'
uri_object_key_regex = r'[A-Za-z0-9_./-]+'
paginated_cls = RemoteRepositoryResponsePaginated
fields = {
'id': {
'type': six.text_type,
'description': 'The unique ID for this repository on the '
'hosting service.',
},
'name': {
'type': six.text_type,
'description': 'The name of the repository.',
},
'owner': {
'type': six.text_type,
'description': 'The owner of the repository, which may be a user '
'account or an organization, depending on the '
'service.',
},
'scm_type': {
'type': six.text_type,
'description': 'The type of repository, mapping to registered '
'SCMTools on Review Board.',
},
'path': {
'type': six.text_type,
'description': 'The repository path as recommended by the hosting '
'service.',
},
'mirror_path': {
'type': six.text_type,
'description': 'A secondary path that can be used to reach the '
'repository.',
},
}
uri_object_key = 'repository_id'
autogenerate_etags = True
allowed_methods = ('GET',)
def has_list_access_permissions(self, request, *args, **kwargs):
account = resources.hosting_service_account.get_object(
request, *args, **kwargs)
# Only allow administrators or those with the ability to modify the
# account to see what repositories are listed.
return account.is_mutable_by(request.user)
def has_access_permissions(self, request, remote_repository,
*args, **kwargs):
# Only allow administrators or those with the ability to modify the
# account to see what repositories are listed.
return remote_repository.hosting_service_account.is_mutable_by(
request.user)
def get_queryset(self, request, start=None, is_list=False,
repository_id=None, *args, **kwargs):
account = resources.hosting_service_account.get_object(
request, *args, **kwargs)
if is_list:
# Wrap the paginator in a LocalDataQuerySet, so that we can get
# to it later in RemoteRepositoryResponsePaginated.
lookup_kwargs = {}
for name in ('owner', 'owner-type', 'filter-type'):
if kwargs.get(name):
arg = name.replace('-', '_')
lookup_kwargs[arg] = kwargs[name]
result = account.service.get_remote_repositories(start=start,
**lookup_kwargs)
else:
result = account.service.get_remote_repository(repository_id)
return LocalDataQuerySet([result])
def get_serializer_for_object(self, obj):
if isinstance(obj, RemoteRepository):
return self
return super(RemoteRepositoryResource, self).get_serializer_for_object(
obj)
@webapi_request_fields(
optional={
'owner': {
'type': six.text_type,
'description': 'The owner (user account or organization) '
'to look up repositories for. Defaults to '
'the owner of the hosting service account.',
},
'owner-type': {
'type': six.text_type,
'description': 'Indicates what sort of account the owner '
'represents. This may be required by some '
'services, and the values are dependent on '
'that service.',
},
'filter-type': {
'type': six.text_type,
'description': 'Filters the list of results. Allowed values '
'are dependent on the hosting service. '
'Unexpected values will be ignored.',
},
'start': {
'type': int,
'description': 'The 0-based index of the first page of '
'results to fetch.',
},
},
allow_unknown=True
)
@augment_method_from(WebAPIResource)
def get_list(self, request, *args, **kwargs):
"""Returns the list of remote repositories on the hosting service.
Different hosting service backends have different criteria for
performing the lookups. Some hosting services have multiple types of
owners, specified by passing ``owner-type``. Filtering may also be
possible by passing ``filter-type``. Performing lookups requires
knowing the possible values for the service ahead of time and passing
the proper parameters in the query string.
Pagination works a bit differently for this resource than most.
Instead of ``?start=`` taking an index into the number of results,
this resource's ``?start=`` takes a 0-based index of the page of
results.
``?max-results=`` and ``?counts-only=`` are not supported, as they're
not compatible with all hosting services.
Callers should always use the ``next`` and ``prev`` links for
navigation, and should not build page indexes themselves.
"""
pass
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Provides information on a particular remote repository.
If the remote repository exists and is accessible by the linked
hosting service account (that of the parent resource), then the
details of that repository will be returned in the payload.
The ID expected for the lookup in the URL is specific to the type
of hosting service.
"""
pass
remote_repository_resource = RemoteRepositoryResource()
|
|
from __future__ import absolute_import, division, print_function
from .expressions import ElemWise, schema_method_list, method_properties
import datashape
from datashape import dshape, isdatelike, isnumeric
__all__ = ['DateTime', 'Date', 'date', 'Year', 'year', 'Month', 'month', 'Day',
'day', 'Hour', 'hour', 'Second', 'second', 'Millisecond',
'millisecond', 'Microsecond', 'microsecond', 'Date', 'date', 'Time',
'time', 'UTCFromTimestamp', 'DateTimeTruncate']
class DateTime(ElemWise):
""" Superclass for datetime accessors """
__slots__ = '_hash', '_child',
def __str__(self):
return '%s.%s' % (str(self._child), type(self).__name__.lower())
@property
def schema(self):
return dshape(self._dtype)
@property
def _name(self):
return '%s_%s' % (self._child._name, self.attr)
@property
def attr(self):
return type(self).__name__.lower()
class Date(DateTime):
_dtype = datashape.date_
def date(expr):
return Date(expr)
class Year(DateTime):
_dtype = datashape.int32
def year(expr):
return Year(expr)
class Month(DateTime):
_dtype = datashape.int64
def month(expr):
return Month(expr)
class Day(DateTime):
_dtype = datashape.int64
def day(expr):
return Day(expr)
class Time(DateTime):
_dtype = datashape.time_
def time(expr):
return Time(expr)
class Hour(DateTime):
_dtype = datashape.int64
def hour(expr):
return Hour(expr)
class Minute(DateTime):
_dtype = datashape.int64
def minute(expr):
return Minute(expr)
class Second(DateTime):
_dtype = datashape.int64
def second(expr):
return Second(expr)
class Millisecond(DateTime):
_dtype = datashape.int64
def millisecond(expr):
return Millisecond(expr)
class Microsecond(DateTime):
_dtype = datashape.int64
def microsecond(expr):
return Microsecond(expr)
class UTCFromTimestamp(DateTime):
_dtype = datashape.datetime_
def utcfromtimestamp(expr):
return UTCFromTimestamp(expr)
units = [
'year',
'month',
'week',
'day',
'hour',
'minute',
'second',
'millisecond',
'microsecond',
'nanosecond'
]
_unit_aliases = {
'y': 'year',
'w': 'week',
'd': 'day',
'date': 'day',
'h': 'hour',
's': 'second',
'ms': 'millisecond',
'us': 'microsecond',
'ns': 'nanosecond'
}
def normalize_time_unit(s):
""" Normalize time input to one of 'year', 'second', 'millisecond', etc..
Examples
--------
>>> normalize_time_unit('milliseconds')
'millisecond'
>>> normalize_time_unit('ms')
'millisecond'
"""
s = s.lower().strip()
if s in units:
return s
if s in _unit_aliases:
return _unit_aliases[s]
if s[-1] == 's':
return normalize_time_unit(s.rstrip('s'))
raise ValueError("Do not understand time unit %s" % s)
class DateTimeTruncate(DateTime):
__slots__ = '_hash', '_child', 'measure', 'unit'
@property
def _dtype(self):
if units.index('day') >= units.index(self.unit):
return datashape.date_
else:
return datashape.datetime_
@property
def _name(self):
return self._child._name
def __str__(self):
return '%s.truncate(%ss=%g)' % (self._child, self.unit, self.measure)
def truncate(expr, *args, **kwargs):
""" Truncate datetime expression
Examples
--------
>>> from blaze import symbol, compute
>>> from datetime import datetime
>>> s = symbol('s', 'datetime')
>>> expr = s.truncate(10, 'minutes')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.datetime(2000, 6, 25, 12, 30)
>>> expr = s.truncate(1, 'week')
>>> compute(expr, datetime(2000, 6, 25, 12, 35, 10))
datetime.date(2000, 6, 25)
Alternatively use keyword arguments to specify unit and measure
>>> # expr = s.truncate(2, 'weeks')
>>> expr = s.truncate(weeks=2)
"""
if not args and not kwargs:
raise TypeError('truncate takes exactly 2 positional arguments, '
'e.g., truncate(2, "days") or 1 keyword argument, '
'e.g., truncate(days=2)')
if args:
assert not kwargs, ('only positional arguments allowed if any are '
'passed in')
measure, unit = args
if kwargs:
assert not args, 'only keyword arguments allowed if any are passed in'
[(unit, measure)] = kwargs.items()
return DateTimeTruncate(expr, measure, normalize_time_unit(unit))
schema_method_list.extend([
(isdatelike, set([year, month, day, hour, minute, date, time, second,
millisecond, microsecond, truncate])),
(isnumeric, set([utcfromtimestamp]))
])
method_properties |= set([year, month, day, hour, minute, second, millisecond,
microsecond, date, time, utcfromtimestamp])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.