gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import copy
import datetime
import sys
import inspect
from botocore.vendored import six
if six.PY3:
from six.moves import http_client
class HTTPHeaders(http_client.HTTPMessage):
pass
from urllib.parse import quote
from urllib.parse import urlencode
from urllib.parse import unquote
from urllib.parse import unquote_plus
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
from urllib.parse import urljoin
from urllib.parse import parse_qsl
from urllib.parse import parse_qs
from http.client import HTTPResponse
from io import IOBase as _IOBase
from base64 import encodebytes
from email.utils import formatdate
from itertools import zip_longest
file_type = _IOBase
zip = zip
# In python3, unquote takes a str() object, url decodes it,
# then takes the bytestring and decodes it to utf-8.
# Python2 we'll have to do this ourself (see below).
unquote_str = unquote_plus
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp.raw._sock.settimeout(timeout)
def accepts_kwargs(func):
# In python3.4.1, there's backwards incompatible
# changes when using getargspec with functools.partials.
return inspect.getfullargspec(func)[2]
else:
from urllib import quote
from urllib import urlencode
from urllib import unquote
from urllib import unquote_plus
from urlparse import urlsplit
from urlparse import urlunsplit
from urlparse import urljoin
from urlparse import parse_qsl
from urlparse import parse_qs
from email.message import Message
from email.Utils import formatdate
file_type = file
from itertools import izip as zip
from itertools import izip_longest as zip_longest
from httplib import HTTPResponse
from base64 import encodestring as encodebytes
class HTTPHeaders(Message):
# The __iter__ method is not available in python2.x, so we have
# to port the py3 version.
def __iter__(self):
for field, value in self._headers:
yield field
def unquote_str(value, encoding='utf-8'):
# In python2, unquote() gives us a string back that has the urldecoded
# bits, but not the unicode parts. We need to decode this manually.
# unquote has special logic in which if it receives a unicode object it
# will decode it to latin1. This is hard coded. To avoid this, we'll
# encode the string with the passed in encoding before trying to
# unquote it.
byte_string = value.encode(encoding)
return unquote_plus(byte_string).decode(encoding)
def set_socket_timeout(http_response, timeout):
"""Set the timeout of the socket from an HTTPResponse.
:param http_response: An instance of ``httplib.HTTPResponse``
"""
http_response._fp.fp._sock.settimeout(timeout)
def accepts_kwargs(func):
return inspect.getargspec(func)[2]
try:
from collections import OrderedDict
except ImportError:
# Python2.6 we use the 3rd party back port.
from ordereddict import OrderedDict
if sys.version_info[:2] == (2, 6):
import simplejson as json
# In py26, invalid xml parsed by element tree
# will raise a plain old SyntaxError instead of
# a real exception, so we need to abstract this change.
XMLParseError = SyntaxError
# Handle https://github.com/shazow/urllib3/issues/497 for py2.6. In
# python2.6, there is a known issue where sometimes we cannot read the SAN
# from an SSL cert (http://bugs.python.org/issue13034). However, newer
# versions of urllib3 will warn you when there is no SAN. While we could
# just turn off this warning in urllib3 altogether, we _do_ want warnings
# when they're legitimate warnings. This method tries to scope the warning
# filter to be as specific as possible.
def filter_ssl_san_warnings():
import warnings
from botocore.vendored.requests.packages.urllib3 import exceptions
warnings.filterwarnings(
'ignore',
message="Certificate has no.*subjectAltName.*",
category=exceptions.SecurityWarning,
module=".*urllib3\.connection")
else:
import xml.etree.cElementTree
XMLParseError = xml.etree.cElementTree.ParseError
import json
def filter_ssl_san_warnings():
# Noop for non-py26 versions. We will parse the SAN
# appropriately.
pass
@classmethod
def from_dict(cls, d):
new_instance = cls()
for key, value in d.items():
new_instance[key] = value
return new_instance
@classmethod
def from_pairs(cls, pairs):
new_instance = cls()
for key, value in pairs:
new_instance[key] = value
return new_instance
HTTPHeaders.from_dict = from_dict
HTTPHeaders.from_pairs = from_pairs
def copy_kwargs(kwargs):
"""
There is a bug in Python versions < 2.6.5 that prevents you
from passing unicode keyword args (#4978). This function
takes a dictionary of kwargs and returns a copy. If you are
using Python < 2.6.5, it also encodes the keys to avoid this bug.
Oh, and version_info wasn't a namedtuple back then, either!
"""
vi = sys.version_info
if vi[0] == 2 and vi[1] <= 6 and vi[3] < 5:
copy_kwargs = {}
for key in kwargs:
copy_kwargs[key.encode('utf-8')] = kwargs[key]
else:
copy_kwargs = copy.copy(kwargs)
return copy_kwargs
def total_seconds(delta):
"""
Returns the total seconds in a ``datetime.timedelta``.
Python 2.6 does not have ``timedelta.total_seconds()``, so we have
to calculate this ourselves. On 2.7 or better, we'll take advantage of the
built-in method.
The math was pulled from the ``datetime`` docs
(http://docs.python.org/2.7/library/datetime.html#datetime.timedelta.total_seconds).
:param delta: The timedelta object
:type delta: ``datetime.timedelta``
"""
if sys.version_info[:2] != (2, 6):
return delta.total_seconds()
day_in_seconds = delta.days * 24 * 3600.0
micro_in_seconds = delta.microseconds / 10.0**6
return day_in_seconds + delta.seconds + micro_in_seconds
|
|
"""This is a fairly straightforward fork of the python2.6 pprint module.
While the standard pprint uses a fairly odd and nonstandard indentation scheme,
we strive to output with an indentation that's suitable for python code written
in a standard style.
"""
from .version import __version__
__version__ = __version__ # pyflakes
###
# Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
import warnings
from cStringIO import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=4, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=4, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
def _sorted(iterable):
with warnings.catch_warnings():
if _sys.py3kwarning:
warnings.filterwarnings("ignore", "comparing unequal types "
"not supported", DeprecationWarning)
return sorted(iterable)
class PrettyPrinter:
def __init__(self, indent=4, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
"""The core pretty-printing function.
Input:
object -- The value to be pretty-printed.
stream -- The file-like output of this pretty-print.
State variables, used in recursion:
indent -- The "current" indentation level, as an integer count of columns.
allowance -- The number of columns already "used up" on the current line,
not counting indentation. It seems that this value was totally broken
in stdlib cpython pprint.
context -- The set of all nested objects above this one, used for
cycle detection.
level -- The count of how many objects are nested "above" this one.
This is used in implementing the "depth" feature of PrettyPrinter.
"""
level = level + 1
objid = _id(object)
write = stream.write
if objid in context:
rep = _recursion(object)
write(rep)
replen = _len(rep)
allowance += rep
self._recursive = True
self._readable = False
return allowance
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) + indent + allowance + 1 >= self._width
if self._depth and level > self._depth:
write(rep)
replen = _len(rep)
return allowance + replen
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
write('{')
allowance += 1
if sepLines:
write('\n' + (indent + self._indent_per_level) * ' ')
allowance = 0
length = _len(object)
if length:
context[objid] = 1
indent += self._indent_per_level
items = _sorted(object.items())
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
replen = _len(rep)
allowance += replen + 2
allowance = self._format(ent, stream, indent, allowance, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
replen = _len(rep)
if sepLines:
write(',\n%s%s: ' % (' '*indent, rep))
allowance = replen + 2
else:
write(', %s: ' % rep)
allowance += replen + 4
allowance = self._format(ent, stream, indent, allowance, context, level)
indent -= self._indent_per_level
del context[objid]
if sepLines:
write(',\n' + indent * ' ')
allowance = 0
write('}')
allowance += 1
return allowance
elif ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
allowance += 1
endchar = ']'
elif issubclass(typ, set):
if not length:
write('set()')
return allowance + 5
write('set([')
allowance += 5
endchar = '])'
object = _sorted(object)
elif issubclass(typ, frozenset):
if not length:
write('frozenset()')
return allowance + 11
write('frozenset([')
allowance += 11
endchar = '])'
object = _sorted(object)
else:
write('(')
allowance += 1
endchar = ')'
if sepLines:
write('\n' + (indent + self._indent_per_level) * ' ')
allowance = 0
if length:
context[objid] = 1
indent = indent + self._indent_per_level
allowance = self._format(object[0], stream, indent, allowance, context, level)
if length > 1:
for ent in object[1:]:
if sepLines:
write(',\n' + ' '*indent)
allowance = 0
else:
write(', ')
allowance += 2
allowance = self._format(ent, stream, indent, allowance, context, level)
indent = indent - self._indent_per_level
del context[objid]
if sepLines:
write(',\n' + indent * ' ')
allowance = 0
elif issubclass(typ, tuple) and length == 1:
write(',')
allowance += 1
write(endchar)
return allowance
write(rep)
return allowance
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
for k, v in _sorted(object.items()):
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print "_safe_repr:", t2 - t1
print "pformat:", t3 - t2
if __name__ == "__main__":
_perfcheck()
# vim:et:sts=4:sw=4:
|
|
# Copyright 2002-2011 Nick Mathewson. See LICENSE for licensing information.
"""mixminion.BuildMessage
Code to fragment and reassemble messages."""
import binascii
import math
import time
import mixminion._minionlib
import mixminion.Filestore
from mixminion.Crypto import ceilDiv, getCommonPRNG, sha1, whiten, unwhiten
from mixminion.Common import disp64, LOG, previousMidnight, MixError, \
MixFatalError
from mixminion.Packet import ENC_FWD_OVERHEAD, PAYLOAD_LEN, \
FRAGMENT_PAYLOAD_OVERHEAD
__all__ = [ "FragmentPool", "FragmentationParams" ]
# Largest number of allowed fragments in a single chunk. Must be a power
# of two.
MAX_FRAGMENTS_PER_CHUNK = 16
# Minimum proportion of extra packets to add to each chunk.
EXP_FACTOR = 1.3333333333333333
class FragmentationParams:
"""Class to track the padding, chunking, and fragmentation required
for a message of a given length to be packed into fragments of a
given capacity."""
## Fields:
# length -- size (in octets) of the original message.
# k -- number of input packets for each chunk (also number of packets
# from a chunk required to reconstruct it.)
# n -- number of output packets for each chunk.
# fragmentCapacity -- number of bytes we can fit in a single fragment.
# (28KB - overhead)
# chunkSize -- number of input bytes in a single chunk. Equal to
# k*fragmentCapacity.
# nChunks -- number of total chunks in message.
# paddingLen -- bytes added to message before fragmentation
# paddedLen -- length of message after padding; equal to chunkSize*nChunks
# fec -- mixminion._minionlib.FEC object to encode/decode chunks;
# lazy-initialized by getFEC()
def __init__(self, length, overhead):
assert overhead in (0, ENC_FWD_OVERHEAD)
self.length = length
self.fragCapacity = PAYLOAD_LEN - FRAGMENT_PAYLOAD_OVERHEAD - overhead
# minimum number of payloads to hold msg, without fragmentation
# or padding.
minFragments = ceilDiv(length, self.fragCapacity)
assert minFragments >= 2
# Number of data fragments per chunk.
self.k = 2
while self.k < minFragments and self.k < MAX_FRAGMENTS_PER_CHUNK:
self.k *= 2
# Number of chunks.
self.nChunks = ceilDiv(minFragments, self.k)
# Number of total fragments per chunk.
self.n = int(math.ceil(EXP_FACTOR * self.k))
# Data in a single chunk
self.chunkSize = self.fragCapacity * self.k
# Length of data to fill chunks
self.paddedLen = self.nChunks * self.fragCapacity * self.k
# Length of padding needed to fill all chunks with data.
self.paddingLen = self.paddedLen - length
# FEC object
self.fec = None
def getFEC(self):
"""Return a FEC object to fragment or defragment messages with
these parameters"""
if self.fec is None:
self.fec = _getFEC(self.k, self.n)
return self.fec
def getPosition(self, index):
"""Return a chunk,index-within-chunk tuple for a packet with index
'index'"""
chunk, pos = divmod(index, self.n)
return chunk, pos
def getFragments(self, s, paddingPRNG=None):
"""Given a string of length self.length, whiten it, pad it,
and fragmment it. Return a list of the fragments, in order.
(Note -- after building the fragment packets, be sure to shuffle
them into a random order.)"""
if paddingPRNG is None:
paddingPRNG = getCommonPRNG()
self.getFEC()
assert len(s) == self.length
s = whiten(s)
s += paddingPRNG.getBytes(self.paddingLen)
assert len(s) == self.paddedLen
chunks = []
for i in xrange(self.nChunks):
chunks.append( s[i*self.chunkSize:(i+1)*self.chunkSize] )
del s
fragments = []
for i in xrange(self.nChunks):
blocks = []
for j in xrange(self.k):
blocks.append( chunks[i][j*self.fragCapacity:
(j+1)*self.fragCapacity] )
chunks[i] = None
for j in xrange(self.n):
fragments.append( self.fec.encode(j, blocks) )
return fragments
# ======================================================================
class FragmentPool:
"""Class to hold and manage fragmented messages as they are
reconstructed."""
## Fields:
# states -- map from messageid to MessageState. Reconstructed by
# rescan().
# db -- instance of FragmentDB.
# store -- instance of StringMetadataStore. The messages are either
# the contents of invidual fragments or reconstructed chunks.
# The metadata are instances of FragmentMetadata.
def __init__(self, dir):
"""Open a FragmentPool storing fragments in 'dir' and records of
old messages in 'dir_db'.
"""
self.store = mixminion.Filestore.StringMetadataStore(
dir,create=1)
self.db = FragmentDB(dir+"_db")
self.rescan()
def cleanQueue(self, deleteFn=None):
"""Expunge all removed fragments from disk. See Filestore.cleanQueue"""
self.store.cleanQueue(deleteFn)
def sync(self):
"""Flush pending changes to disk."""
self.db.sync()
def close(self):
"""Release open resources for this pool."""
self.db.close()
del self.db
del self.store
del self.states
def addFragment(self, fragmentPacket, nym=None, now=None, verbose=0):
"""Given an instance of mixminion.Packet.FragmentPayload, record
the fragment if appropriate and update the state of the
fragment pool if necessary. Returns the message ID that was
updated, or None if the fragment was redundant or misformed.
fragmentPacket -- the new fragment to add.
nym -- a string representing the identity that received this
fragment. [Tracking nyms is important, to prevent an
attack where we send 2 fragments to 'MarkTwain' and 2
fragments to 'SClemens', and see that the message is
reconstructed.]
verbose -- if true, log information at the INFO level;
otherwise, log at DEBUG.
"""
if verbose:
say = LOG.info
else:
say = LOG.debug
if now is None:
now = time.time()
today = previousMidnight(now)
# If the message has already been rejected or completed, we can
# drop this packet.
s = self.db.getStatusAndTime(fragmentPacket.msgID)
if s:
say("Dropping fragment of %s message %r",
s[0].lower(), disp64(fragmentPacket.msgID,12))
return None
# Otherwise, create a new metadata object for this fragment...
meta = FragmentMetadata(messageid=fragmentPacket.msgID,
idx=fragmentPacket.index,
size=fragmentPacket.msgLen,
isChunk=0,
chunkNum=None,
overhead=fragmentPacket.getOverhead(),
insertedDate=today,
nym=nym,
digest=sha1(fragmentPacket.data))
# ... and allocate or find the MessageState for this message.
state = self._getState(meta)
try:
# Check whether we can/should add this message, but do not
# add it.
state.addFragment(None, meta, noop=1)
# No exception was thrown; queue the message.
h = self.store.queueMessageAndMetadata(fragmentPacket.data, meta)
# And *now* update the message state.
state.addFragment(h, meta)
say("Stored fragment %s of message %s",
fragmentPacket.index+1, disp64(fragmentPacket.msgID,12))
return fragmentPacket.msgID
except MismatchedFragment, s:
# Remove the other fragments, mark msgid as bad.
LOG.warn("Found inconsistent fragment %s in message %s: %s",
fragmentPacket.index+1, disp64(fragmentPacket.msgID,12),
s)
self._deleteMessageIDs({ meta.messageid : 1}, "REJECTED", now)
return None
except UnneededFragment:
# Discard this fragment; we don't need it.
say("Dropping unneeded fragment %s of message %s",
fragmentPacket.index+1, disp64(fragmentPacket.msgID,12))
return None
def getReadyMessage(self, msgid):
"""Return the complete message associated with messageid 'msgid'.
(If no such complete message is found, return None.) The
resulting message is unwhitened, but not uncompressed."""
s = self.states.get(msgid)
if not s or not s.isDone():
return None
hs = s.getChunkHandles()
msg = "".join([self.store.messageContents(h) for h in hs])
msg = unwhiten(msg[:s.params.length])
return msg
def markMessageCompleted(self, msgid, rejected=0):
"""Release all resources associated with the messageid 'msgid', and
reject future packets for that messageid. If 'rejected', the
message has been abandoned and not sent; otherwise, the message
has been sent.
"""
s = self.states.get(msgid)
if not s or not s.isDone():
return None
if rejected:
self._deleteMessageIDs({msgid: 1}, "REJECTED")
else:
self._deleteMessageIDs({msgid: 1}, "COMPLETED")
def listReadyMessages(self):
"""Return a list of all messageIDs that have been completely
reconstructed."""
return [ msgid
for msgid,state in self.states.items()
if state.isDone() ]
def unchunkMessages(self):
"""If any messages are ready for partial or full reconstruction,
reconstruct as many of their chunks as possible."""
for msgid, state in self.states.items():
if not state.hasReadyChunks():
continue
state.reconstruct(self.store)
def expireMessages(self, cutoff):
"""Remove all pending messages that were first inserted before
'cutoff'. """
expiredMessageIDs = {}
for s in self.states.values():
if s.inserted < cutoff:
expiredMessageIDs[s.messageid] = 1
self._deleteMessageIDs(expiredMessageIDs, "REJECTED")
def rescan(self):
"""Check all fragment metadata objects on disk, and reconstruct our
internal view of message states.
"""
# Delete all internal state; reload FragmentMetadatas from disk.
self.store.loadAllMetadata(lambda: None)
meta = self.store._metadata_cache
self.states = {}
badMessageIDs = {} # map from bad messageID to 1
unneededHandles = [] # list of handles that aren't needed.
for h, fm in meta.items():
if not fm:
LOG.debug("Removing fragment %s with missing metadata", h)
self.store.removeMessage(h)
continue
try:
mid = fm.messageid
if badMessageIDs.has_key(mid):
# We've already decided to reject fragments with this ID.
pass
else:
# All is well; try to register the fragment/chunk. If it's
# redundant or inconsistent, raise an exception.
state = self._getState(fm)
if fm.isChunk:
state.addChunk(h, fm)
else:
state.addFragment(h, fm)
except MismatchedFragment:
# Mark the message ID for this fragment as inconsistent.
badMessageIDs[mid] = 1
except UnneededFragment:
LOG.warn("Found redundant fragment %s in pool", h)
# Remember that this message is unneeded.
unneededHandles.append(h)
# Check for fragments superseded by chunks -- those are unneeded too.
for s in self.states.values():
unneededHandles.extend(s.getUnneededFragmentHandles())
# Delete unneeded fragments.
for h in unneededHandles:
try:
fm = meta[h]
except KeyError:
continue
LOG.debug("Removing unneeded fragment %s from message ID %r",
fm.idx, fm.messageid)
self.store.removeMessage(h)
# Now nuke inconsistent messages.
self._deleteMessageIDs(badMessageIDs, "REJECTED")
def _deleteMessageIDs(self, messageIDSet, why, today=None):
"""Helper function. Remove all the fragments and chunks associated
with a given message, and mark the message as delivered or
undeliverable.
messageIDSet -- a map from 20-byte messageID to 1.
why -- 'REJECTED' or 'COMPLETED' or '?'
"""
assert why in ("REJECTED", "COMPLETED", "?")
if not messageIDSet:
return
if today is None:
today = time.time()
today = previousMidnight(today)
if why == 'REJECTED':
LOG.debug("Removing bogus messages by IDs: %s",
messageIDSet.keys())
elif why == "COMPLETED":
LOG.debug("Removing completed messages by IDs: %s",
messageIDSet.keys())
else:
LOG.debug("Removing messages by IDs: %s",
messageIDSet.keys())
for mid in messageIDSet.keys():
if why == "?":
state = self.states[mid]
if state.isDone:
whythis = "COMPLETED"
else:
whythis = "REJECTED"
else:
whythis = why
self.db.markStatus(mid, whythis, today)
try:
del self.states[mid]
except KeyError:
pass
for h, fm in self.store._metadata_cache.items():
if messageIDSet.has_key(fm.messageid):
self.store.removeMessage(h)
def _getState(self, fm):
"""Helper function. Return the MessageState object associated with
a given FragmentMetadata; allocate it if necessary."""
try:
return self.states[fm.messageid]
except KeyError:
state = MessageState(messageid=fm.messageid,
length=fm.size,
overhead=fm.overhead,
inserted=fm.insertedDate,
nym=fm.nym)
self.states[fm.messageid] = state
return state
def getStateByMsgID(self, msgid):
"""Given a message ID (either a 20-byte full ID or a 12-byte
pretty-printed ID prefix), return a MessageState object for
the corresponding message, or None if the message is not
recognized."""
if len(msgid) == 20:
return self.state.get(msgid,None)
elif len(msgid) == 12:
target = binascii.a2b_base64(msgid)
for i in self.states.keys():
if i.startswith(target):
return self.states[i]
return None
def listMessages(self):
"""Return a map from pretty-printed message ID to dicts mapping:
'size' to the size of the message, in bytes
'nym' to the pseudonym receiving the message
'have' to the number of packets we have so far
'need' to the number of additional packets we need.
"""
result = {}
for msgid in self.states.keys():
state = self.states[msgid]
have, need = state.getCompleteness()
result[disp64(msgid,12)] = {
'size' : state.params.length,
'nym' : state.nym,
'have' : have,
'need' : need
}
return result
# ======================================================================
class MismatchedFragment(Exception):
"""Exception raised when a fragment isn't compatible with the other
fragments with a given message ID. Because fragments are
integrity-checked on their way in, inconsistent fragments mean the
message is corrupt."""
pass
class UnneededFragment(Exception):
"""Exception raised when a fragment is unneeded, and doesn't need to be
stored to disk."""
pass
class FragmentMetadata:
"""Persistent metadata object to hold the state of a given fragment or
reconstructed chunk."""
## Fields
# messageid -- unique 20-byte identifier for the message this fragment
# comes from.
# idx -- index of the fragment within the message. In the case of a
# chunk, it's equal to chunkNum.
# size -- total length of the message.
# isChunk -- true iff this is a reconstructed chunk.
# chunkNum -- number of the chunk to which this fragment belongs.
# overhead -- Payload overhead for this fragment. Equal to 0 or
# ENC_FWD_OVERHEAD.
# insertedDate -- Midnight GMT before the day this fragment was received.
# nym -- name of the identity that received this fragment.
# digest -- digest of the fragment/chunk; None for pre-0.0.7
def __init__(self, messageid, idx, size, isChunk, chunkNum, overhead,
insertedDate, nym, digest):
self.messageid = messageid
self.idx = idx
self.size = size
self.isChunk = isChunk
self.chunkNum = chunkNum
self.overhead = overhead
self.insertedDate = insertedDate
self.nym = nym
self.digest = digest
def __getstate__(self):
return ("V1", self.messageid, self.idx, self.size,
self.isChunk, self.chunkNum, self.overhead, self.insertedDate,
self.nym, self.digest)
def __setstate__(self, state):
if state[0] == 'V0':
(_, self.messageid, self.idx, self.size,
self.isChunk, self.chunkNum, self.overhead, self.insertedDate,
self.nym) = state
self.digest = None
elif state[0] == 'V1':
(_, self.messageid, self.idx, self.size,
self.isChunk, self.chunkNum, self.overhead, self.insertedDate,
self.nym,self.digest) = state
else:
raise MixFatalError("Unrecognized fragment state")
class MessageState:
"""Helper class. Tracks the status of the reconstruction of a
single message. MessageState objects are not persistent, and must
be reconstructed from FragmentMetadata objects whenever a
fragment pool is rescanned.
"""
## Fields:
# messageid -- the 20-byte message ID of this message.
# overhead -- the overhead for messages sent via this message
# inserted -- the midnight (GMT) of the day on the first packet
# associated with this message was inserted.
# nym -- the name of the identity receiving this message. Used to
# prevent linkage attacks.
#
# params -- an instance of FragmentationParams for this message.
# chunks -- a map from chunk number to tuples of (handle within the pool,
# FragmentMetadata object). For completed chunks.
# fragmentsByChunk -- a list mapping chunk number to maps from
# index-within-chunk to (handle,FragmentMetadata)
# readyChunks -- a map whose keys are the numbers of chunks that
# are ready for reconstruction, but haven't been reconstructed
# yet.
def __init__(self, messageid, length, overhead, inserted, nym):
"""Create a new MessageState.
"""
self.messageid = messageid
self.overhead = overhead
self.inserted = inserted
self.nym = nym
# chunkno -> handle,fragmentmeta
self.chunks = {}
# chunkno -> idxwithinchunk -> (handle,fragmentmeta)
self.fragmentsByChunk = []
self.params = FragmentationParams(length, overhead)
for _ in xrange(self.params.nChunks):
self.fragmentsByChunk.append({})
# chunkset: ready chunk num -> 1
self.readyChunks = {}
def isDone(self):
"""Return true iff we have reconstructed all the chunks for this
message."""
return len(self.chunks) == self.params.nChunks
def getChunkHandles(self):
"""Requires self.isDone(). Return an in-order list for the handles
of the reconstructed chunks of this message."""
assert self.isDone()
return [ self.chunks[i][0] for i in xrange(self.params.nChunks) ]
def getCompleteness(self):
"""Return a tuple of (have,need), where 'need' is the final number
of packets needed to reconstruct the message, and 'have' is the
number we have so far."""
need = self.params.k * self.params.nChunks
have = self.params.k * len(self.chunks)
for d in self.fragmentsByChunk:
have += min(len(d),self.params.k)
return have, need
def addChunk(self, h, fm):
"""Register a chunk with handle h and FragmentMetadata fm. If the
chunk is inconsistent with other fragments of this message,
raise MismatchedFragment."""
assert fm.isChunk
assert fm.messageid == self.messageid
if fm.size != self.params.length:
raise MismatchedFragment("Mismatched message length")
if fm.overhead != self.overhead:
raise MismatchedFragment("Mismatched packet overhead")
if self.chunks.has_key(fm.chunkNum):
raise MismatchedFragment("Duplicate chunks")
if fm.nym != self.nym:
raise MismatchedFragment("Fragments received for differing identities")
if self.inserted > fm.insertedDate:
self.inserted = fm.insertedDate
self.chunks[fm.chunkNum] = (h,fm)
if self.fragmentsByChunk[fm.chunkNum]:
LOG.warn("Found a chunk with unneeded fragments for message %r",
self.messageid)
if self.readyChunks.get(fm.chunkNum):
del self.readyChunks[fm.chunkNum]
def addFragment(self, h, fm, noop=0):
"""Register a fragment with handle h and FragmentMetadata fm. If the
fragment is inconsistent with the other fragments of this message,
raise MismatchedFragment. If the fragment isn't neeeded (because
enough fragments for its chunks have already been received),
raise UnneededFragment). If 'noop' is true, do not add this
fragment--just raise exceptions as needed."""
assert fm.messageid == self.messageid
if fm.size != self.params.length:
raise MismatchedFragment("mismatched message size")
if fm.overhead != self.overhead:
raise MismatchedFragment("mismatched fragment payload size")
if fm.nym != self.nym:
raise MismatchedFragment("mismatched identities")
chunkNum, pos = self.params.getPosition(fm.idx)
if chunkNum >= self.params.nChunks:
raise MismatchedFragment
if (self.chunks.has_key(chunkNum) or
len(self.fragmentsByChunk[chunkNum]) >= self.params.k):
raise UnneededFragment
if self.fragmentsByChunk[chunkNum].has_key(pos):
previous = self.fragmentsByChunk[chunkNum][pos][1]
if previous.digest is None or previous.digest == fm.digest:
raise UnneededFragment("already seen this fragment")
else:
raise MismatchedFragment("multiple fragments for one position")
if noop:
return
assert h
if self.inserted > fm.insertedDate:
self.inserted = fm.insertedDate
self.fragmentsByChunk[chunkNum][pos] = (h, fm)
if len(self.fragmentsByChunk[chunkNum]) >= self.params.k:
self.readyChunks[chunkNum] = 1
def hasReadyChunks(self):
"""Return true iff some of the chunks in this message are pending
reconstruction."""
return len(self.readyChunks) != 0
def reconstruct(self, store):
"""If any of the chunks in this message are pending reconstruction,
reconstruct them in a given store."""
if not self.readyChunks:
return
for chunkno in self.readyChunks.keys():
# Get the first K fragments in the chunk. (list of h,fm)
ch = self.fragmentsByChunk[chunkno].values()[:self.params.k]
minDate = min([fm.insertedDate for h, fm in ch])
# Build a list of (position-within-chunk, fragment-contents).
frags = [(self.params.getPosition(fm.idx)[1],
store.messageContents(h)) for h,fm in ch]
chunkText = "".join(self.params.getFEC().decode(frags))
del frags
fm2 = FragmentMetadata(messageid=self.messageid,
idx=chunkno, size=self.params.length,
isChunk=1, chunkNum=chunkno,
overhead=self.overhead,
insertedDate=minDate, nym=self.nym,
digest=sha1(chunkText))
# Queue the chunk.
h2 = store.queueMessageAndMetadata(chunkText, fm2)
del chunkText
# Remove superceded fragments.
for h, fm in ch:
store.removeMessage(h)
# Update this MessageState object.
self.fragmentsByChunk[chunkno] = {}
del self.readyChunks[chunkno]
self.addChunk(h2, fm2)
def getUnneededFragmentHandles(self):
"""Returns any handles for fragments that have been superceded by
chunks."""
r = []
for chunkno in self.chunks.keys():
r.extend([ h for h,_ in self.fragmentsByChunk[chunkno].values()])
return r
class FragmentDB(mixminion.Filestore.DBBase):
"""Internal class. Uses a database background (such as dbm, berkely db,
gdbm, etc.) to remember which message IDs have already been
reconstructed or noted as corrupt
"""
def __init__(self, location):
"""Open a new FragmentDB; stores its data in files beginning with
'location'."""
mixminion.Filestore.DBBase.__init__(self, location, "fragment")
self.sync()
def markStatus(self, msgid, status, today=None):
"""Note fragments for a message with message ID 'msgid' should no
longer be stored. 'status' is one of 'COMPLETED' or 'REJECTED',
depending on whether the message was delivered or undeliverable."""
assert status in ("COMPLETED", "REJECTED")
if today is None:
today = time.time()
today = previousMidnight(today)
self[msgid] = (status, today)
def getStatusAndTime(self, msgid):
"""Given a messageID, return a 2-tuple of status,resolutiondate.
Return None if the message is still deliverable."""
return self.get(msgid, None)
def _encodeKey(self, k):
return binascii.b2a_hex(k)
def _encodeVal(self, v):
status, tm = v
return "%s-%s"%(
{"COMPLETED":"C", "REJECTED":"R"}[status], str(tm))
def _decodeVal(self, v):
status = {"C":"COMPLETED", "R":"REJECTED"}[v[0]]
tm = int(v[2:])
return status, tm
# ======================================================================
# Internal lazy-generated cache from (k,n) to _minionlib.FEC object.
# Note that we only use k,n for a limited set of k,n.
def _blankFECtable():
"""Return a map from permissible k,n tuples to FEC objects"""
f = {}
k = 2
while k <= MAX_FRAGMENTS_PER_CHUNK:
f[(k, int(math.ceil(EXP_FACTOR*k)))] = None
k *= 2
return f
# global map.
_fectab = _blankFECtable()
def _getFEC(k,n):
"""Given k and n parameters, return a FEC object to fragment and
reconstruct messages given those parameters."""
# There's a possible race condition here where two threads note
# that a given set of parameters haven't been generated, and both
# generate them. This is harmless.
f = _fectab[(k,n)]
if f is None:
f = _fectab[(k,n)] = mixminion._minionlib.FEC_generate(k,n)
return f
|
|
# Copyright 2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=protected-access
import os
from devlib import AndroidTarget, TargetError
from devlib.target import KernelConfig, KernelVersion, Cpuinfo
from devlib.utils.android import AndroidProperties
from wa.framework.configuration.core import settings
from wa.framework.exception import ConfigError
from wa.utils.serializer import read_pod, write_pod, Podable
from wa.utils.misc import atomic_write_path
def cpuinfo_from_pod(pod):
cpuinfo = Cpuinfo('')
cpuinfo.sections = pod['cpuinfo']
lines = []
for section in cpuinfo.sections:
for key, value in section.items():
line = '{}: {}'.format(key, value)
lines.append(line)
lines.append('')
cpuinfo.text = '\n'.join(lines)
return cpuinfo
def kernel_version_from_pod(pod):
release_string = pod['kernel_release']
version_string = pod['kernel_version']
if release_string:
if version_string:
kernel_string = '{} #{}'.format(release_string, version_string)
else:
kernel_string = release_string
else:
kernel_string = '#{}'.format(version_string)
return KernelVersion(kernel_string)
def kernel_config_from_pod(pod):
config = KernelConfig('')
config.typed_config._config = pod['kernel_config']
lines = []
for key, value in config.items():
if value == 'n':
lines.append('# {} is not set'.format(key))
else:
lines.append('{}={}'.format(key, value))
config.text = '\n'.join(lines)
return config
class CpufreqInfo(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
pod = CpufreqInfo._upgrade_pod(pod)
return CpufreqInfo(**pod)
def __init__(self, **kwargs):
super(CpufreqInfo, self).__init__()
self.available_frequencies = kwargs.pop('available_frequencies', [])
self.available_governors = kwargs.pop('available_governors', [])
self.related_cpus = kwargs.pop('related_cpus', [])
self.driver = kwargs.pop('driver', None)
self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version)
def to_pod(self):
pod = super(CpufreqInfo, self).to_pod()
pod.update(self.__dict__)
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
def __repr__(self):
return 'Cpufreq({} {})'.format(self.driver, self.related_cpus)
__str__ = __repr__
class IdleStateInfo(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
pod = IdleStateInfo._upgrade_pod(pod)
return IdleStateInfo(**pod)
def __init__(self, **kwargs):
super(IdleStateInfo, self).__init__()
self.name = kwargs.pop('name', None)
self.desc = kwargs.pop('desc', None)
self.power = kwargs.pop('power', None)
self.latency = kwargs.pop('latency', None)
self._pod_version = kwargs.pop('_pod_version', self._pod_serialization_version)
def to_pod(self):
pod = super(IdleStateInfo, self).to_pod()
pod.update(self.__dict__)
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
def __repr__(self):
return 'IdleState({}/{})'.format(self.name, self.desc)
__str__ = __repr__
class CpuidleInfo(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
pod = CpuidleInfo._upgrade_pod(pod)
instance = CpuidleInfo()
instance._pod_version = pod['_pod_version']
instance.governor = pod['governor']
instance.driver = pod['driver']
instance.states = [IdleStateInfo.from_pod(s) for s in pod['states']]
return instance
@property
def num_states(self):
return len(self.states)
def __init__(self):
super(CpuidleInfo, self).__init__()
self.governor = None
self.driver = None
self.states = []
def to_pod(self):
pod = super(CpuidleInfo, self).to_pod()
pod['governor'] = self.governor
pod['driver'] = self.driver
pod['states'] = [s.to_pod() for s in self.states]
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
def __repr__(self):
return 'Cpuidle({}/{} {} states)'.format(
self.governor, self.driver, self.num_states)
__str__ = __repr__
class CpuInfo(Podable):
_pod_serialization_version = 1
@staticmethod
def from_pod(pod):
instance = super(CpuInfo, CpuInfo).from_pod(pod)
instance.id = pod['id']
instance.name = pod['name']
instance.architecture = pod['architecture']
instance.features = pod['features']
instance.cpufreq = CpufreqInfo.from_pod(pod['cpufreq'])
instance.cpuidle = CpuidleInfo.from_pod(pod['cpuidle'])
return instance
def __init__(self):
super(CpuInfo, self).__init__()
self.id = None
self.name = None
self.architecture = None
self.features = []
self.cpufreq = CpufreqInfo()
self.cpuidle = CpuidleInfo()
def to_pod(self):
pod = super(CpuInfo, self).to_pod()
pod['id'] = self.id
pod['name'] = self.name
pod['architecture'] = self.architecture
pod['features'] = self.features
pod['cpufreq'] = self.cpufreq.to_pod()
pod['cpuidle'] = self.cpuidle.to_pod()
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
return pod
def __repr__(self):
return 'Cpu({} {})'.format(self.id, self.name)
__str__ = __repr__
def get_target_info(target):
info = TargetInfo()
info.target = target.__class__.__name__
info.modules = target.modules
info.os = target.os
info.os_version = target.os_version
info.system_id = target.system_id
info.abi = target.abi
info.is_rooted = target.is_rooted
info.kernel_version = target.kernel_version
info.kernel_config = target.config
info.hostname = target.hostname
info.hostid = target.hostid
try:
info.sched_features = target.read_value('/sys/kernel/debug/sched_features').split()
except TargetError:
# best effort -- debugfs might not be mounted
pass
for i, name in enumerate(target.cpuinfo.cpu_names):
cpu = CpuInfo()
cpu.id = i
cpu.name = name
cpu.features = target.cpuinfo.get_cpu_features(i)
cpu.architecture = target.cpuinfo.architecture
if target.has('cpufreq'):
cpu.cpufreq.available_governors = target.cpufreq.list_governors(i)
cpu.cpufreq.available_frequencies = target.cpufreq.list_frequencies(i)
cpu.cpufreq.related_cpus = target.cpufreq.get_related_cpus(i)
cpu.cpufreq.driver = target.cpufreq.get_driver(i)
if target.has('cpuidle'):
cpu.cpuidle.driver = target.cpuidle.get_driver()
cpu.cpuidle.governor = target.cpuidle.get_governor()
for state in target.cpuidle.get_states(i):
state_info = IdleStateInfo()
state_info.name = state.name
state_info.desc = state.desc
state_info.power = state.power
state_info.latency = state.latency
cpu.cpuidle.states.append(state_info)
info.cpus.append(cpu)
info.page_size_kb = target.page_size_kb
if isinstance(target, AndroidTarget):
info.screen_resolution = target.screen_resolution
info.prop = target.getprop()
info.android_id = target.android_id
return info
def read_target_info_cache():
if not os.path.exists(settings.cache_directory):
os.makedirs(settings.cache_directory)
if not os.path.isfile(settings.target_info_cache_file):
return {}
return read_pod(settings.target_info_cache_file)
def write_target_info_cache(cache):
if not os.path.exists(settings.cache_directory):
os.makedirs(settings.cache_directory)
with atomic_write_path(settings.target_info_cache_file) as at_path:
write_pod(cache, at_path)
def get_target_info_from_cache(system_id, cache=None):
if cache is None:
cache = read_target_info_cache()
pod = cache.get(system_id, None)
if not pod:
return None
_pod_version = pod.get('_pod_version', 0)
if _pod_version != TargetInfo._pod_serialization_version:
msg = 'Target info version mismatch. Expected {}, but found {}.\nTry deleting {}'
raise ConfigError(msg.format(TargetInfo._pod_serialization_version, _pod_version,
settings.target_info_cache_file))
return TargetInfo.from_pod(pod)
def cache_target_info(target_info, overwrite=False, cache=None):
if cache is None:
cache = read_target_info_cache()
if target_info.system_id in cache and not overwrite:
raise ValueError('TargetInfo for {} is already in cache.'.format(target_info.system_id))
cache[target_info.system_id] = target_info.to_pod()
write_target_info_cache(cache)
class TargetInfo(Podable):
_pod_serialization_version = 5
@staticmethod
def from_pod(pod):
instance = super(TargetInfo, TargetInfo).from_pod(pod)
instance.target = pod['target']
instance.modules = pod['modules']
instance.abi = pod['abi']
instance.cpus = [CpuInfo.from_pod(c) for c in pod['cpus']]
instance.os = pod['os']
instance.os_version = pod['os_version']
instance.system_id = pod['system_id']
instance.hostid = pod['hostid']
instance.hostname = pod['hostname']
instance.abi = pod['abi']
instance.is_rooted = pod['is_rooted']
instance.kernel_version = kernel_version_from_pod(pod)
instance.kernel_config = kernel_config_from_pod(pod)
instance.sched_features = pod['sched_features']
instance.page_size_kb = pod.get('page_size_kb')
if instance.os == 'android':
instance.screen_resolution = pod['screen_resolution']
instance.prop = AndroidProperties('')
instance.prop._properties = pod['prop']
instance.android_id = pod['android_id']
return instance
def __init__(self):
super(TargetInfo, self).__init__()
self.target = None
self.modules = []
self.cpus = []
self.os = None
self.os_version = None
self.system_id = None
self.hostid = None
self.hostname = None
self.abi = None
self.is_rooted = None
self.kernel_version = None
self.kernel_config = None
self.sched_features = None
self.screen_resolution = None
self.prop = None
self.android_id = None
self.page_size_kb = None
def to_pod(self):
pod = super(TargetInfo, self).to_pod()
pod['target'] = self.target
pod['modules'] = self.modules
pod['abi'] = self.abi
pod['cpus'] = [c.to_pod() for c in self.cpus]
pod['os'] = self.os
pod['os_version'] = self.os_version
pod['system_id'] = self.system_id
pod['hostid'] = self.hostid
pod['hostname'] = self.hostname
pod['abi'] = self.abi
pod['is_rooted'] = self.is_rooted
pod['kernel_release'] = self.kernel_version.release
pod['kernel_version'] = self.kernel_version.version
pod['kernel_config'] = dict(self.kernel_config.iteritems())
pod['sched_features'] = self.sched_features
pod['page_size_kb'] = self.page_size_kb
if self.os == 'android':
pod['screen_resolution'] = self.screen_resolution
pod['prop'] = self.prop._properties
pod['android_id'] = self.android_id
return pod
@staticmethod
def _pod_upgrade_v1(pod):
pod['_pod_version'] = pod.get('_pod_version', 1)
pod['cpus'] = pod.get('cpus', [])
pod['system_id'] = pod.get('system_id')
pod['hostid'] = pod.get('hostid')
pod['hostname'] = pod.get('hostname')
pod['sched_features'] = pod.get('sched_features')
pod['screen_resolution'] = pod.get('screen_resolution', (0, 0))
pod['prop'] = pod.get('prop')
pod['android_id'] = pod.get('android_id')
return pod
@staticmethod
def _pod_upgrade_v2(pod):
pod['page_size_kb'] = pod.get('page_size_kb')
pod['_pod_version'] = pod.get('format_version', 0)
return pod
@staticmethod
def _pod_upgrade_v3(pod):
config = {}
for key, value in pod['kernel_config'].items():
config[key.upper()] = value
pod['kernel_config'] = config
return pod
@staticmethod
def _pod_upgrade_v4(pod):
return TargetInfo._pod_upgrade_v3(pod)
@staticmethod
def _pod_upgrade_v5(pod):
pod['modules'] = pod.get('modules') or []
return pod
|
|
#!/usr/bin/env python
# coding: utf-8
"""Test environment for client library tests.
This module has functions for creating keyspaces, tablets for the client
library test.
"""
import hashlib
import random
import struct
import threading
import time
import traceback
import unittest
import environment
import tablet
import utils
from clientlib_tests import topo_schema
from clientlib_tests import db_class_unsharded
from clientlib_tests import db_class_sharded
from clientlib_tests import db_class_lookup
from vtdb import database_context
from vtdb import db_object
from vtdb import keyrange
from vtdb import keyrange_constants
from vtdb import keyspace
from vtdb import dbexceptions
from vtdb import shard_constants
from vtdb import vtdb_logger
from vtdb import vtgatev2
from vtdb import vtgate_cursor
conn_class = vtgatev2
__tablets = None
shard_names = ['-80', '80-']
shard_kid_map = {'-80': [527875958493693904, 626750931627689502,
345387386794260318, 332484755310826578,
1842642426274125671, 1326307661227634652,
1761124146422844620, 1661669973250483744,
3361397649937244239, 2444880764308344533],
'80-': [9767889778372766922, 9742070682920810358,
10296850775085416642, 9537430901666854108,
10440455099304929791, 11454183276974683945,
11185910247776122031, 10460396697869122981,
13379616110062597001, 12826553979133932576],
}
pack_kid = struct.Struct('!Q').pack
def setUpModule():
try:
environment.topo_server().setup()
setup_topology()
# start mysql instance external to the test
global __tablets
setup_procs = []
for tablet in __tablets:
setup_procs.append(tablet.init_mysql())
utils.wait_procs(setup_procs)
create_db()
start_tablets()
utils.VtGate().start()
except:
tearDownModule()
raise
def tearDownModule():
global __tablets
if utils.options.skip_teardown:
return
if __tablets is not None:
tablet.kill_tablets(__tablets)
teardown_procs = []
for t in __tablets:
teardown_procs.append(t.teardown_mysql())
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
if __tablets is not None:
for t in __tablets:
t.remove_tree()
def setup_topology():
global __tablets
if __tablets is None:
__tablets = []
keyspaces = topo_schema.keyspaces
for ks in keyspaces:
ks_name = ks[0]
ks_type = ks[1]
utils.run_vtctl(['CreateKeyspace', ks_name])
if ks_type == shard_constants.UNSHARDED:
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_master.init_tablet('master', keyspace=ks_name, shard='0')
__tablets.append(shard_master)
shard_replica.init_tablet('replica', keyspace=ks_name, shard='0')
__tablets.append(shard_replica)
elif ks_type == shard_constants.RANGE_SHARDED:
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', ks_name,
'keyspace_id', 'uint64'])
for shard_name in shard_names:
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_master.init_tablet('master', keyspace=ks_name, shard=shard_name)
__tablets.append(shard_master)
shard_replica.init_tablet('replica', keyspace=ks_name, shard=shard_name)
__tablets.append(shard_replica)
utils.run_vtctl(['RebuildKeyspaceGraph', ks_name], auto_log=True)
def create_db():
global __tablets
for t in __tablets:
t.create_db(t.dbname)
ks_name = t.keyspace
for table_tuple in topo_schema.keyspace_table_map[ks_name]:
t.mquery(t.dbname, table_tuple[1])
def start_tablets():
global __tablets
# start tablets
for t in __tablets:
t.start_vttablet(wait_for_state=None)
# wait for them to come in serving state
for t in __tablets:
t.wait_for_vttablet_state('SERVING')
# InitShardMaster for master tablets
for t in __tablets:
if t.tablet_type == 'master':
utils.run_vtctl(['InitShardMaster', t.keyspace+'/'+t.shard,
t.tablet_alias], auto_log=True)
for ks in topo_schema.keyspaces:
ks_name = ks[0]
ks_type = ks[1]
utils.run_vtctl(['RebuildKeyspaceGraph', ks_name],
auto_log=True)
if ks_type == shard_constants.RANGE_SHARDED:
utils.check_srv_keyspace('test_nj', ks_name,
'Partitions(master): -80 80-\n' +
'Partitions(rdonly): -80 80-\n' +
'Partitions(replica): -80 80-\n')
def get_connection(user=None, password=None):
timeout = 10.0
conn = None
vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
conn = conn_class.connect(vtgate_addrs, timeout,
user=user, password=password)
return conn
def get_keyrange(shard_name):
kr = None
if shard_name == keyrange_constants.SHARD_ZERO:
kr = keyrange.KeyRange(keyrange_constants.NON_PARTIAL_KEYRANGE)
else:
kr = keyrange.KeyRange(shard_name)
return kr
def _delete_all(keyspace, shard_name, table_name):
vtgate_conn = get_connection()
# This write is to set up the test with fresh insert
# and hence performing it directly on the connection.
vtgate_conn.begin()
vtgate_conn._execute("delete from %s" % table_name, {},
keyspace, 'master',
keyranges=[get_keyrange(shard_name)])
vtgate_conn.commit()
def restart_vtgate(extra_args={}):
port = utils.vtgate.port
utils.vtgate.kill()
utils.VtGate(port=port).start(extra_args=extra_args)
def populate_table():
keyspace = "KS_UNSHARDED"
_delete_all(keyspace, keyrange_constants.SHARD_ZERO, 'vt_unsharded')
vtgate_conn = get_connection()
cursor = vtgate_conn.cursor(keyspace, 'master', keyranges=[get_keyrange(keyrange_constants.SHARD_ZERO),],writable=True)
cursor.begin()
for x in xrange(10):
cursor.execute('insert into vt_unsharded (id, msg) values (%s, %s)' % (str(x), 'msg'), {})
cursor.commit()
class TestUnshardedTable(unittest.TestCase):
def setUp(self):
self.vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
self.dc = database_context.DatabaseContext(self.vtgate_addrs)
self.all_ids = []
with database_context.WriteTransaction(self.dc) as context:
for x in xrange(20):
ret_id = db_class_unsharded.VtUnsharded.insert(context.get_cursor(),
msg="test message")
self.all_ids.append(ret_id)
def tearDown(self):
_delete_all("KS_UNSHARDED", "0", 'vt_unsharded')
def test_read(self):
id_val = self.all_ids[0]
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_unsharded.VtUnsharded.select_by_id(
context.get_cursor(), id_val)
expected = 1
self.assertEqual(len(rows), expected, "wrong number of rows fetched %d, expected %d" % (len(rows), expected))
self.assertEqual(rows[0].id, id_val, "wrong row fetched")
def test_update_and_read(self):
id_val = self.all_ids[0]
where_column_value_pairs = [('id', id_val)]
with database_context.WriteTransaction(self.dc) as context:
update_cols = [('msg', "test update"),]
db_class_unsharded.VtUnsharded.update_columns(context.get_cursor(),
where_column_value_pairs,
update_column_value_pairs=update_cols)
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_unsharded.VtUnsharded.select_by_id(context.get_cursor(), id_val)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
self.assertEqual(rows[0].msg, "test update", "wrong row fetched")
def test_delete_and_read(self):
id_val = self.all_ids[-1]
where_column_value_pairs = [('id', id_val)]
with database_context.WriteTransaction(self.dc) as context:
db_class_unsharded.VtUnsharded.delete_by_columns(context.get_cursor(),
where_column_value_pairs)
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_unsharded.VtUnsharded.select_by_id(context.get_cursor(), id_val)
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
self.all_ids = self.all_ids[:-1]
def test_count(self):
with database_context.ReadFromMaster(self.dc) as context:
count = db_class_unsharded.VtUnsharded.get_count(
context.get_cursor(), msg="test message")
expected = len(self.all_ids)
self.assertEqual(count, expected, "wrong count fetched; expected %d got %d" % (expected, count))
def test_min_id(self):
with database_context.ReadFromMaster(self.dc) as context:
min_id = db_class_unsharded.VtUnsharded.get_min(
context.get_cursor())
expected = min(self.all_ids)
self.assertEqual(min_id, expected, "wrong min value fetched; expected %d got %d" % (expected, min_id))
def test_max_id(self):
with database_context.ReadFromMaster(self.dc) as context:
max_id = db_class_unsharded.VtUnsharded.get_max(
context.get_cursor())
self.all_ids.sort()
expected = max(self.all_ids)
self.assertEqual(max_id, expected, "wrong max value fetched; expected %d got %d" % (expected, max_id))
class TestRangeSharded(unittest.TestCase):
def populate_tables(self):
self.user_id_list = []
self.song_id_list = []
self.user_song_map = {}
r = random.Random()
# This should create the lookup entries and sharding key.
with database_context.WriteTransaction(self.dc) as context:
for x in xrange(20):
# vt_user - EntityRangeSharded; creates username:user_id lookup
user_id = db_class_sharded.VtUser.insert(context.get_cursor(),
username="user%s" % x, msg="test message")
self.user_id_list.append(user_id)
# vt_user_email - RangeSharded; references user_id:keyspace_id hash
email = 'user%s@google.com' % x
m = hashlib.md5()
m.update(email)
email_hash = m.digest()
entity_id_map={'user_id':user_id}
db_class_sharded.VtUserEmail.insert(
context.get_cursor(entity_id_map=entity_id_map),
user_id=user_id, email=email,
email_hash=email_hash)
# vt_song - EntityRangeSharded; creates song_id:user_id lookup
num_songs_for_user = r.randint(1, 5)
for i in xrange(num_songs_for_user):
song_id = db_class_sharded.VtSong.insert(context.get_cursor(),
user_id=user_id, title="Test Song")
self.song_id_list.append(song_id)
self.user_song_map.setdefault(user_id, []).append(song_id)
# vt_song_detail - RangeSharded; references song_id:user_id lookup
entity_id_map = {'song_id':song_id}
db_class_sharded.VtSongDetail.insert(context.get_cursor(entity_id_map=entity_id_map),
song_id=song_id, album_name="Test album",
artist="Test artist")
def setUp(self):
self.vtgate_addrs = {"vt": [utils.vtgate.addr(),]}
self.dc = database_context.DatabaseContext(self.vtgate_addrs)
self.populate_tables()
def tearDown(self):
with database_context.WriteTransaction(self.dc) as context:
for uid in self.user_id_list:
try:
db_class_sharded.VtUser.delete_by_columns(context.get_cursor(entity_id_map={'id':uid}),
[('id', uid),])
db_class_sharded.VtUserEmail.delete_by_columns(context.get_cursor(entity_id_map={'user_id':uid}),
[('user_id', uid),])
db_class_sharded.VtSong.delete_by_columns(context.get_cursor(entity_id_map={'user_id':uid}),
[('user_id', uid),])
song_id_list = self.user_song_map[uid]
for sid in song_id_list:
db_class_sharded.VtSongDetail.delete_by_columns(context.get_cursor(entity_id_map={'song_id':sid}),
[('song_id', sid),])
except dbexceptions.DatabaseError as e:
if str(e) == "DB Row not found":
pass
def test_sharding_key_read(self):
user_id = self.user_id_list[0]
with database_context.ReadFromMaster(self.dc) as context:
where_column_value_pairs = [('id', user_id),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
where_column_value_pairs = [('user_id', user_id),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUserEmail.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
where_column_value_pairs = [('user_id', user_id),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtSong.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), len(self.user_song_map[user_id]), "wrong number of rows fetched")
def test_entity_id_read(self):
user_id = self.user_id_list[0]
with database_context.ReadFromMaster(self.dc) as context:
entity_id_map = {'username': 'user0'}
rows = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
[('id', user_id),])
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
where_column_value_pairs = [('id', self.user_song_map[user_id][0]),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtSong.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
where_column_value_pairs = [('song_id', self.user_song_map[user_id][0]),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtSongDetail.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
def test_in_clause_read(self):
with database_context.ReadFromMaster(self.dc) as context:
user_id_list = [self.user_id_list[0], self.user_id_list[1]]
where_column_value_pairs = (('id', user_id_list),)
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUser.select_by_ids(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
got = [row.id for row in rows]
got.sort()
self.assertEqual(user_id_list, got, "wrong rows fetched; expected %s got %s" % (user_id_list, got))
username_list = [row.username for row in rows]
username_list.sort()
where_column_value_pairs = (('username', username_list),)
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUser.select_by_ids(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
got = [row.username for row in rows]
got.sort()
self.assertEqual(username_list, got, "wrong rows fetched; expected %s got %s" % (username_list, got))
where_column_value_pairs = (('user_id', user_id_list),)
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUserEmail.select_by_ids(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 2, "wrong number of rows fetched")
got = [row.user_id for row in rows]
got.sort()
self.assertEqual(user_id_list, got, "wrong rows fetched; expected %s got %s" % (user_id_list, got))
song_id_list = []
for user_id in user_id_list:
song_id_list.extend(self.user_song_map[user_id])
song_id_list.sort()
where_column_value_pairs = [('id', song_id_list),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtSong.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
got = [row.id for row in rows]
got.sort()
self.assertEqual(song_id_list, got, "wrong rows fetched %s got %s" % (song_id_list, got))
where_column_value_pairs = [('song_id', song_id_list),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtSongDetail.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
got = [row.song_id for row in rows]
got.sort()
self.assertEqual(song_id_list, got, "wrong rows fetched %s got %s" % (song_id_list, got))
def test_keyrange_read(self):
where_column_value_pairs = []
with database_context.ReadFromMaster(self.dc) as context:
rows1 = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(keyrange='-80'), where_column_value_pairs)
rows2 = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(keyrange='80-'), where_column_value_pairs)
fetched_rows = len(rows1) + len(rows2)
expected = len(self.user_id_list)
self.assertEqual(fetched_rows, expected, "wrong number of rows fetched expected:%d got:%d" % (expected, fetched_rows))
def test_scatter_read(self):
where_column_value_pairs = []
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
where_column_value_pairs)
self.assertEqual(len(rows), len(self.user_id_list), "wrong number of rows fetched, expecting %d got %d" % (len(self.user_id_list), len(rows)))
def test_streaming_read(self):
where_column_value_pairs = []
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_sharded.VtUser.select_by_columns_streaming(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
where_column_value_pairs)
got_user_id_list = []
for r in rows:
got_user_id_list.append(r.id)
self.assertEqual(len(got_user_id_list), len(self.user_id_list), "wrong number of rows fetched")
def update_columns(self):
with database_context.WriteTransaction(self.dc) as context:
user_id = self.user_id_list[1]
where_column_value_pairs = [('id', user_id),]
entity_id_map = {'id': user_id}
new_username = 'new_user%s' % user_id
update_cols = [('username', new_username),]
db_class_sharded.VtUser.update_columns(context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs,
update_column_value_pairs=update_cols)
# verify the updated value.
where_column_value_pairs = [('id', user_id),]
rows = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(entity_id_map={'id': user_id}),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
self.assertEqual(new_username, rows[0].username)
where_column_value_pairs = [('user_id', user_id),]
entity_id_map = {'user_id': user_id}
new_email = 'new_user%s@google.com' % user_id
m = hashlib.md5()
m.update(new_email)
email_hash = m.digest()
update_cols = [('email', new_email), ('email_hash', email_hash)]
db_class_sharded.VtUserEmail.update_columns(context.get_cursor(entity_id_map={'user_id':user_id}),
where_column_value_pairs,
update_column_value_pairs=update_cols)
# verify the updated value.
with database_context.ReadFromMaster(self.dc) as context:
where_column_value_pairs = [('user_id', user_id),]
entity_id_map = dict(where_column_value_pairs)
rows = db_class_sharded.VtUserEmail.select_by_ids(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 1, "wrong number of rows fetched")
self.assertEqual(new_email, rows[0].email)
self.user_id_list.sort()
def delete_columns(self):
user_id = self.user_id_list[-1]
with database_context.WriteTransaction(self.dc) as context:
where_column_value_pairs = [('id', user_id),]
entity_id_map = {'id': user_id}
db_class_sharded.VtUser.delete_by_columns(context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
where_column_value_pairs = [('user_id', user_id),]
entity_id_map = {'user_id': user_id}
db_class_sharded.VtUserEmail.delete_by_columns(context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
with database_context.ReadFromMaster(self.dc) as context:
rows = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
rows = db_class_sharded.VtUserEmail.select_by_ids(
context.get_cursor(entity_id_map=entity_id_map),
where_column_value_pairs)
self.assertEqual(len(rows), 0, "wrong number of rows fetched")
self.user_id_list = self.user_id_list[:-1]
self.user_id_list.sort()
def test_count(self):
with database_context.ReadFromMaster(self.dc) as context:
count = db_class_sharded.VtUser.get_count(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE),
msg="test message")
expected = len(self.user_id_list)
self.assertEqual(count, expected, "wrong count fetched; expected %d got %d" % (expected, count))
def test_min_id(self):
with database_context.ReadFromMaster(self.dc) as context:
min_id = db_class_sharded.VtUser.get_min(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE))
self.user_id_list.sort()
expected = min(self.user_id_list)
rows1 = db_class_sharded.VtUser.select_by_columns(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE), [])
id_list = [row.id for row in rows1]
self.assertEqual(min_id, expected, "wrong min value fetched; expected %d got %d" % (expected, min_id))
def test_max_id(self):
with database_context.ReadFromMaster(self.dc) as context:
max_id = db_class_sharded.VtUser.get_max(
context.get_cursor(keyrange=keyrange_constants.NON_PARTIAL_KEYRANGE))
expected = max(self.user_id_list)
self.assertEqual(max_id, expected, "wrong max value fetched; expected %d got %d" % (expected, max_id))
if __name__ == '__main__':
utils.main()
|
|
from os import path
from .info import __VERSION__
# <p>Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd</p>
# <p>This module is part of the xlrd package, which is released under a
# BSD-style licence.</p>
from . import licences
##
# <p><b>A Python module for extracting data from MS Excel (TM) spreadsheet files.
# <br /><br />
# Version 0.7.4 -- April 2012
# </b></p>
#
# <h2>General information</h2>
#
# <h3>Acknowledgements</h3>
#
# <p>
# Development of this module would not have been possible without the document
# "OpenOffice.org's Documentation of the Microsoft Excel File Format"
# ("OOo docs" for short).
# The latest version is available from OpenOffice.org in
# <a href=http://sc.openoffice.org/excelfileformat.pdf> PDF format</a>
# and
# <a href=http://sc.openoffice.org/excelfileformat.odt> ODT format.</a>
# Small portions of the OOo docs are reproduced in this
# document. A study of the OOo docs is recommended for those who wish a
# deeper understanding of the Excel file layout than the xlrd docs can provide.
# </p>
#
# <p>Backporting to Python 2.1 was partially funded by
# <a href=http://journyx.com/>
# Journyx - provider of timesheet and project accounting solutions.
# </a>
# </p>
#
# <p>Provision of formatting information in version 0.6.1 was funded by
# <a href=http://www.simplistix.co.uk>
# Simplistix Ltd.
# </a>
# </p>
#
# <h3>Unicode</h3>
#
# <p>This module presents all text strings as Python unicode objects.
# From Excel 97 onwards, text in Excel spreadsheets has been stored as Unicode.
# Older files (Excel 95 and earlier) don't keep strings in Unicode;
# a CODEPAGE record provides a codepage number (for example, 1252) which is
# used by xlrd to derive the encoding (for same example: "cp1252") which is
# used to translate to Unicode.</p>
# <small>
# <p>If the CODEPAGE record is missing (possible if the file was created
# by third-party software), xlrd will assume that the encoding is ascii, and keep going.
# If the actual encoding is not ascii, a UnicodeDecodeError exception will be raised and
# you will need to determine the encoding yourself, and tell xlrd:
# <pre>
# book = xlrd.open_workbook(..., encoding_override="cp1252")
# </pre></p>
# <p>If the CODEPAGE record exists but is wrong (for example, the codepage
# number is 1251, but the strings are actually encoded in koi8_r),
# it can be overridden using the same mechanism.
# The supplied runxlrd.py has a corresponding command-line argument, which
# may be used for experimentation:
# <pre>
# runxlrd.py -e koi8_r 3rows myfile.xls
# </pre></p>
# <p>The first place to look for an encoding ("codec name") is
# <a href=http://docs.python.org/lib/standard-encodings.html>
# the Python documentation</a>.
# </p>
# </small>
#
# <h3>Dates in Excel spreadsheets</h3>
#
# <p>In reality, there are no such things. What you have are floating point
# numbers and pious hope.
# There are several problems with Excel dates:</p>
#
# <p>(1) Dates are not stored as a separate data type; they are stored as
# floating point numbers and you have to rely on
# (a) the "number format" applied to them in Excel and/or
# (b) knowing which cells are supposed to have dates in them.
# This module helps with (a) by inspecting the
# format that has been applied to each number cell;
# if it appears to be a date format, the cell
# is classified as a date rather than a number. Feedback on this feature,
# especially from non-English-speaking locales, would be appreciated.</p>
#
# <p>(2) Excel for Windows stores dates by default as the number of
# days (or fraction thereof) since 1899-12-31T00:00:00. Excel for
# Macintosh uses a default start date of 1904-01-01T00:00:00. The date
# system can be changed in Excel on a per-workbook basis (for example:
# Tools -> Options -> Calculation, tick the "1904 date system" box).
# This is of course a bad idea if there are already dates in the
# workbook. There is no good reason to change it even if there are no
# dates in the workbook. Which date system is in use is recorded in the
# workbook. A workbook transported from Windows to Macintosh (or vice
# versa) will work correctly with the host Excel. When using this
# module's xldate_as_tuple function to convert numbers from a workbook,
# you must use the datemode attribute of the Book object. If you guess,
# or make a judgement depending on where you believe the workbook was
# created, you run the risk of being 1462 days out of kilter.</p>
#
# <p>Reference:
# http://support.microsoft.com/default.aspx?scid=KB;EN-US;q180162</p>
#
#
# <p>(3) The Excel implementation of the Windows-default 1900-based date system works on the
# incorrect premise that 1900 was a leap year. It interprets the number 60 as meaning 1900-02-29,
# which is not a valid date. Consequently any number less than 61 is ambiguous. Example: is 59 the
# result of 1900-02-28 entered directly, or is it 1900-03-01 minus 2 days? The OpenOffice.org Calc
# program "corrects" the Microsoft problem; entering 1900-02-27 causes the number 59 to be stored.
# Save as an XLS file, then open the file with Excel -- you'll see 1900-02-28 displayed.</p>
#
# <p>Reference: http://support.microsoft.com/default.aspx?scid=kb;en-us;214326</p>
#
# <p>(4) The Macintosh-default 1904-based date system counts 1904-01-02 as day 1 and 1904-01-01 as day zero.
# Thus any number such that (0.0 <= number < 1.0) is ambiguous. Is 0.625 a time of day (15:00:00),
# independent of the calendar,
# or should it be interpreted as an instant on a particular day (1904-01-01T15:00:00)?
# The xldate_* functions in this module
# take the view that such a number is a calendar-independent time of day (like Python's datetime.time type) for both
# date systems. This is consistent with more recent Microsoft documentation
# (for example, the help file for Excel 2002 which says that the first day
# in the 1904 date system is 1904-01-02).
#
# <p>(5) Usage of the Excel DATE() function may leave strange dates in a spreadsheet. Quoting the help file,
# in respect of the 1900 date system: "If year is between 0 (zero) and 1899 (inclusive),
# Excel adds that value to 1900 to calculate the year. For example, DATE(108,1,2) returns January 2, 2008 (1900+108)."
# This gimmick, semi-defensible only for arguments up to 99 and only in the pre-Y2K-awareness era,
# means that DATE(1899, 12, 31) is interpreted as 3799-12-31.</p>
#
# <p>For further information, please refer to the documentation for the xldate_* functions.</p>
#
# <h3> Named references, constants, formulas, and macros</h3>
#
# <p>
# A name is used to refer to a cell, a group of cells, a constant
# value, a formula, or a macro. Usually the scope of a name is global
# across the whole workbook. However it can be local to a worksheet.
# For example, if the sales figures are in different cells in
# different sheets, the user may define the name "Sales" in each
# sheet. There are built-in names, like "Print_Area" and
# "Print_Titles"; these two are naturally local to a sheet.
# </p><p>
# To inspect the names with a user interface like MS Excel, OOo Calc,
# or Gnumeric, click on Insert/Names/Define. This will show the global
# names, plus those local to the currently selected sheet.
# </p><p>
# A Book object provides two dictionaries (name_map and
# name_and_scope_map) and a list (name_obj_list) which allow various
# ways of accessing the Name objects. There is one Name object for
# each NAME record found in the workbook. Name objects have many
# attributes, several of which are relevant only when obj.macro is 1.
# </p><p>
# In the examples directory you will find namesdemo.xls which
# showcases the many different ways that names can be used, and
# xlrdnamesAPIdemo.py which offers 3 different queries for inspecting
# the names in your files, and shows how to extract whatever a name is
# referring to. There is currently one "convenience method",
# Name.cell(), which extracts the value in the case where the name
# refers to a single cell. More convenience methods are planned. The
# source code for Name.cell (in __init__.py) is an extra source of
# information on how the Name attributes hang together.
# </p>
#
# <p><i>Name information is <b>not</b> extracted from files older than
# Excel 5.0 (Book.biff_version < 50)</i></p>
#
# <h3>Formatting</h3>
#
# <h4>Introduction</h4>
#
# <p>This collection of features, new in xlrd version 0.6.1, is intended
# to provide the information needed to (1) display/render spreadsheet contents
# (say) on a screen or in a PDF file, and (2) copy spreadsheet data to another
# file without losing the ability to display/render it.</p>
#
# <h4>The Palette; Colour Indexes</h4>
#
# <p>A colour is represented in Excel as a (red, green, blue) ("RGB") tuple
# with each component in range(256). However it is not possible to access an
# unlimited number of colours; each spreadsheet is limited to a palette of 64 different
# colours (24 in Excel 3.0 and 4.0, 8 in Excel 2.0). Colours are referenced by an index
# ("colour index") into this palette.
#
# Colour indexes 0 to 7 represent 8 fixed built-in colours: black, white, red, green, blue,
# yellow, magenta, and cyan.<p>
#
# The remaining colours in the palette (8 to 63 in Excel 5.0 and later)
# can be changed by the user. In the Excel 2003 UI, Tools/Options/Color presents a palette
# of 7 rows of 8 colours. The last two rows are reserved for use in charts.<br />
# The correspondence between this grid and the assigned
# colour indexes is NOT left-to-right top-to-bottom.<br />
# Indexes 8 to 15 correspond to changeable
# parallels of the 8 fixed colours -- for example, index 7 is forever cyan;
# index 15 starts off being cyan but can be changed by the user.<br />
#
# The default colour for each index depends on the file version; tables of the defaults
# are available in the source code. If the user changes one or more colours,
# a PALETTE record appears in the XLS file -- it gives the RGB values for *all* changeable
# indexes.<br />
# Note that colours can be used in "number formats": "[CYAN]...." and "[COLOR8]...." refer
# to colour index 7; "[COLOR16]...." will produce cyan
# unless the user changes colour index 15 to something else.<br />
#
# <p>In addition, there are several "magic" colour indexes used by Excel:<br />
# 0x18 (BIFF3-BIFF4), 0x40 (BIFF5-BIFF8): System window text colour for border lines
# (used in XF, CF, and WINDOW2 records)<br />
# 0x19 (BIFF3-BIFF4), 0x41 (BIFF5-BIFF8): System window background colour for pattern background
# (used in XF and CF records )<br />
# 0x43: System face colour (dialogue background colour)<br />
# 0x4D: System window text colour for chart border lines<br />
# 0x4E: System window background colour for chart areas<br />
# 0x4F: Automatic colour for chart border lines (seems to be always Black)<br />
# 0x50: System ToolTip background colour (used in note objects)<br />
# 0x51: System ToolTip text colour (used in note objects)<br />
# 0x7FFF: System window text colour for fonts (used in FONT and CF records)<br />
# Note 0x7FFF appears to be the *default* colour index. It appears quite often in FONT
# records.<br />
#
# <h4>Default Formatting</h4>
#
# Default formatting is applied to all empty cells (those not described by a cell record).
# Firstly row default information (ROW record, Rowinfo class) is used if available.
# Failing that, column default information (COLINFO record, Colinfo class) is used if available.
# As a last resort the worksheet/workbook default cell format will be used; this
# should always be present in an Excel file,
# described by the XF record with the fixed index 15 (0-based). By default, it uses the
# worksheet/workbook default cell style, described by the very first XF record (index 0).
#
# <h4> Formatting features not included in xlrd version 0.6.1</h4>
# <ul>
# <li>Rich text i.e. strings containing partial <b>bold</b> <i>italic</i>
# and <u>underlined</u> text, change of font inside a string, etc.
# See OOo docs s3.4 and s3.2.
# <i> Rich text is included in version 0.7.2</i></li>
# <li>Asian phonetic text (known as "ruby"), used for Japanese furigana. See OOo docs
# s3.4.2 (p15)</li>
# <li>Conditional formatting. See OOo docs
# s5.12, s6.21 (CONDFMT record), s6.16 (CF record)</li>
# <li>Miscellaneous sheet-level and book-level items e.g. printing layout, screen panes. </li>
# <li>Modern Excel file versions don't keep most of the built-in
# "number formats" in the file; Excel loads formats according to the
# user's locale. Currently xlrd's emulation of this is limited to
# a hard-wired table that applies to the US English locale. This may mean
# that currency symbols, date order, thousands separator, decimals separator, etc
# are inappropriate. Note that this does not affect users who are copying XLS
# files, only those who are visually rendering cells.</li>
# </ul>
#
# <h3>Loading worksheets on demand</h3>
#
# <p>This feature, new in version 0.7.1, is governed by the on_demand argument
# to the open_workbook() function and allows saving memory and time by loading
# only those sheets that the caller is interested in, and releasing sheets
# when no longer required.</p>
#
# <p>on_demand=False (default): No change. open_workbook() loads global data
# and all sheets, releases resources no longer required (principally the
# str or mmap object containing the Workbook stream), and returns.</p>
#
# <p>on_demand=True and BIFF version < 5.0: A warning message is emitted,
# on_demand is recorded as False, and the old process is followed.</p>
#
# <p>on_demand=True and BIFF version >= 5.0: open_workbook() loads global
# data and returns without releasing resources. At this stage, the only
# information available about sheets is Book.nsheets and Book.sheet_names().</p>
#
# <p>Book.sheet_by_name() and Book.sheet_by_index() will load the requested
# sheet if it is not already loaded.</p>
#
# <p>Book.sheets() will load all/any unloaded sheets.</p>
#
# <p>The caller may save memory by calling
# Book.unload_sheet(sheet_name_or_index) when finished with the sheet.
# This applies irrespective of the state of on_demand.</p>
#
# <p>The caller may re-load an unloaded sheet by calling Book.sheet_by_xxxx()
# -- except if those required resources have been released (which will
# have happened automatically when on_demand is false). This is the only
# case where an exception will be raised.</p>
#
# <p>The caller may query the state of a sheet:
# Book.sheet_loaded(sheet_name_or_index) -> a bool</p>
#
# <p> Book.release_resources() may used to save memory and close
# any memory-mapped file before proceding to examine already-loaded
# sheets. Once resources are released, no further sheets can be loaded.</p>
#
# <p> When using on-demand, it is advisable to ensure that
# Book.release_resources() is always called even if an exception
# is raised in your own code; otherwise if the input file has been
# memory-mapped, the mmap.mmap object will not be closed and you will
# not be able to access the physical file until your Python process
# terminates. This can be done by calling Book.release_resources()
# explicitly in the finally suite of a try/finally block.
# New in xlrd 0.7.2: the Book object is a "context manager", so if
# using Python 2.5 or later, you can wrap your code in a "with"
# statement.</p>
##
import sys, zipfile, pprint
from . import timemachine
from .biffh import (
XLRDError,
biff_text_from_num,
error_text_from_code,
XL_CELL_BLANK,
XL_CELL_TEXT,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_EMPTY,
XL_CELL_DATE,
XL_CELL_NUMBER
)
from .formula import * # is constrained by __all__
from .book import Book, colname #### TODO #### formula also has `colname` (restricted to 256 cols)
from .sheet import empty_cell
from .xldate import XLDateError, xldate_as_tuple
if sys.version.startswith("IronPython"):
# print >> sys.stderr, "...importing encodings"
import encodings
try:
import mmap
MMAP_AVAILABLE = 1
except ImportError:
MMAP_AVAILABLE = 0
USE_MMAP = MMAP_AVAILABLE
##
#
# Open a spreadsheet file for data extraction.
#
# @param filename The path to the spreadsheet file to be opened.
#
# @param logfile An open file to which messages and diagnostics are written.
#
# @param verbosity Increases the volume of trace material written to the logfile.
#
# @param use_mmap Whether to use the mmap module is determined heuristically.
# Use this arg to override the result. Current heuristic: mmap is used if it exists.
#
# @param file_contents ... as a string or an mmap.mmap object or some other behave-alike object.
# If file_contents is supplied, filename will not be used, except (possibly) in messages.
#
# @param encoding_override Used to overcome missing or bad codepage information
# in older-version files. Refer to discussion in the <b>Unicode</b> section above.
# <br /> -- New in version 0.6.0
#
# @param formatting_info Governs provision of a reference to an XF (eXtended Format) object
# for each cell in the worksheet.
# <br /> Default is <i>False</i>. This is backwards compatible and saves memory.
# "Blank" cells (those with their own formatting information but no data) are treated as empty
# (by ignoring the file's BLANK and MULBLANK records).
# It cuts off any bottom "margin" of rows of empty (and blank) cells and
# any right "margin" of columns of empty (and blank) cells.
# Only cell_value and cell_type are available.
# <br /> <i>True</i> provides all cells, including empty and blank cells.
# XF information is available for each cell.
# <br /> -- New in version 0.6.1
#
# @param on_demand Governs whether sheets are all loaded initially or when demanded
# by the caller. Please refer back to the section "Loading worksheets on demand" for details.
# <br /> -- New in version 0.7.1
#
# @param ragged_rows False (the default) means all rows are padded out with empty cells so that all
# rows have the same size (Sheet.ncols). True means that there are no empty cells at the ends of rows.
# This can result in substantial memory savings if rows are of widely varying sizes. See also the
# Sheet.row_len() method.
# <br /> -- New in version 0.7.2
#
# @return An instance of the Book class.
def open_workbook(filename=None,
logfile=sys.stdout,
verbosity=0,
use_mmap=USE_MMAP,
file_contents=None,
encoding_override=None,
formatting_info=False,
on_demand=False,
ragged_rows=False,
):
peeksz = 4
if file_contents:
peek = file_contents[:peeksz]
else:
f = open(filename, "rb")
peek = f.read(peeksz)
f.close()
if peek == b"PK\x03\x04": # a ZIP file
if file_contents:
zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents))
else:
zf = zipfile.ZipFile(filename)
# Workaround for some third party files that use forward slashes and
# lower case names. We map the expected name in lowercase to the
# actual filename in the zip container.
component_names = dict([(name.replace('\\', '/').lower(), name)
for name in zf.namelist()])
if verbosity:
logfile.write('ZIP component_names:\n')
pprint.pprint(component_names, logfile)
if 'xl/workbook.xml' in component_names:
from . import xlsx
bk = xlsx.open_workbook_2007_xml(
zf,
component_names,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
if 'xl/workbook.bin' in component_names:
raise XLRDError('Excel 2007 xlsb file; not supported')
if 'content.xml' in component_names:
raise XLRDError('Openoffice.org ODS file; not supported')
raise XLRDError('ZIP file contents not a known type of workbook')
from . import book
bk = book.open_workbook_xls(
filename=filename,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
file_contents=file_contents,
encoding_override=encoding_override,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
##
# For debugging: dump an XLS file's BIFF records in char & hex.
# @param filename The path to the file to be dumped.
# @param outfile An open file, to which the dump is written.
# @param unnumbered If true, omit offsets (for meaningful diffs).
def dump(filename, outfile=sys.stdout, unnumbered=False):
from .biffh import biff_dump
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered)
##
# For debugging and analysis: summarise the file's BIFF records.
# I.e. produce a sorted file of (record_name, count).
# @param filename The path to the file to be summarised.
# @param outfile An open file, to which the summary is written.
def count_records(filename, outfile=sys.stdout):
from .biffh import biff_count_records
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_count_records(bk.mem, bk.base, bk.stream_len, outfile)
|
|
from __future__ import absolute_import, division
from django.core import urlresolvers
from django.db import connection
from django.db.models import Sum
from django.db.models.query import QuerySet
from django.http import HttpResponseNotFound, HttpRequest, HttpResponse
from django.template import RequestContext, loader
from django.utils import timezone
from django.utils.translation import ugettext as _
from jinja2 import Markup as mark_safe
from analytics.lib.counts import CountStat, process_count_stat, COUNT_STATS
from analytics.lib.time_utils import time_range
from analytics.models import BaseCount, InstallationCount, RealmCount, \
UserCount, StreamCount
from zerver.decorator import has_request_variables, REQ, zulip_internal, \
zulip_login_required, to_non_negative_int, to_utc_datetime
from zerver.lib.request import JsonableError
from zerver.lib.response import json_success
from zerver.lib.timestamp import ceiling_to_hour, ceiling_to_day, timestamp_to_datetime
from zerver.models import Realm, UserProfile, UserActivity, \
UserActivityInterval, Client
from zproject.jinja2 import render_to_response
from collections import defaultdict
from datetime import datetime, timedelta
import itertools
import json
import pytz
import re
import time
from six.moves import filter, map, range, zip
from typing import Any, Dict, List, Tuple, Optional, Sequence, Callable, Type, \
Union, Text
@zulip_login_required
def stats(request):
# type: (HttpRequest) -> HttpResponse
return render_to_response('analytics/stats.html')
@has_request_variables
def get_chart_data(request, user_profile, chart_name=REQ(),
min_length=REQ(converter=to_non_negative_int, default=None),
start=REQ(converter=to_utc_datetime, default=None),
end=REQ(converter=to_utc_datetime, default=None)):
# type: (HttpRequest, UserProfile, Text, Optional[int], Optional[datetime], Optional[datetime]) -> HttpResponse
realm = user_profile.realm
# These are implicitly relying on realm.date_created and timezone.now being in UTC.
if start is None:
start = realm.date_created
if end is None:
end = timezone.now()
if start > end:
raise JsonableError(_("Start time is later than end time. Start: %(start)s, End: %(end)s") %
{'start': start, 'end': end})
if chart_name == 'number_of_humans':
stat = COUNT_STATS['active_users:is_bot:day']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
include_empty_subgroups = True
elif chart_name == 'messages_sent_over_time':
stat = COUNT_STATS['messages_sent:is_bot:hour']
tables = [RealmCount]
subgroups = ['false', 'true']
labels = ['human', 'bot']
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_message_type':
stat = COUNT_STATS['messages_sent:message_type:day']
tables = [RealmCount, UserCount]
subgroups = ['public_stream', 'private_stream', 'private_message']
labels = None
include_empty_subgroups = True
elif chart_name == 'messages_sent_by_client':
stat = COUNT_STATS['messages_sent:client:day']
tables = [RealmCount, UserCount]
subgroups = [str(x) for x in Client.objects.values_list('id', flat=True).order_by('id')]
labels = list(Client.objects.values_list('name', flat=True).order_by('id'))
include_empty_subgroups = False
else:
raise JsonableError(_("Unknown chart name: %s") % (chart_name,))
end_times = time_range(start, end, stat.frequency, min_length)
data = {'end_times': end_times, 'frequency': stat.frequency, 'interval': stat.interval}
for table in tables:
if table == RealmCount:
data['realm'] = get_time_series_by_subgroup(
stat, RealmCount, realm.id, end_times, subgroups, labels, include_empty_subgroups)
if table == UserCount:
data['user'] = get_time_series_by_subgroup(
stat, UserCount, user_profile.id, end_times, subgroups, labels, include_empty_subgroups)
return json_success(data=data)
def table_filtered_to_id(table, key_id):
# type: (Type[BaseCount], int) -> QuerySet
if table == RealmCount:
return RealmCount.objects.filter(realm_id=key_id)
elif table == UserCount:
return UserCount.objects.filter(user_id=key_id)
elif table == StreamCount:
return StreamCount.objects.filter(stream_id=key_id)
elif table == InstallationCount:
return InstallationCount.objects.all()
else:
raise ValueError("Unknown table: %s" % (table,))
def get_time_series_by_subgroup(stat, table, key_id, end_times, subgroups, labels, include_empty_subgroups):
# type: (CountStat, Type[BaseCount], Optional[int], List[datetime], List[str], Optional[List[str]], bool) -> Dict[str, List[int]]
if labels is None:
labels = subgroups
if len(subgroups) != len(labels):
raise ValueError("subgroups and labels have lengths %s and %s, which are different." %
(len(subgroups), len(labels)))
queryset = table_filtered_to_id(table, key_id).filter(property=stat.property) \
.values_list('subgroup', 'end_time', 'value')
value_dicts = defaultdict(lambda: defaultdict(int)) # type: Dict[Optional[str], Dict[datetime, int]]
for subgroup, end_time, value in queryset:
value_dicts[subgroup][end_time] = value
value_arrays = {}
for subgroup, label in zip(subgroups, labels):
if (subgroup in value_dicts) or include_empty_subgroups:
value_arrays[label] = [value_dicts[subgroup][end_time] for end_time in end_times]
return value_arrays
eastern_tz = pytz.timezone('US/Eastern')
def make_table(title, cols, rows, has_row_class=False):
# type: (str, List[str], List[Any], bool) -> str
if not has_row_class:
def fix_row(row):
# type: (Any) -> Dict[str, Any]
return dict(cells=row, row_class=None)
rows = list(map(fix_row, rows))
data = dict(title=title, cols=cols, rows=rows)
content = loader.render_to_string(
'analytics/ad_hoc_query.html',
dict(data=data)
)
return content
def dictfetchall(cursor):
# type: (connection.cursor) -> List[Dict[str, Any]]
"Returns all rows from a cursor as a dict"
desc = cursor.description
return [
dict(list(zip([col[0] for col in desc], row)))
for row in cursor.fetchall()
]
def get_realm_day_counts():
# type: () -> Dict[str, Dict[str, str]]
query = '''
select
r.string_id,
(now()::date - pub_date::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
pub_date > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts = defaultdict(dict) # type: Dict[str, Dict[int, int]]
for row in rows:
counts[row['string_id']][row['age']] = row['cnt']
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts)
max_cnt = max(raw_cnts)
def format_count(cnt):
# type: (int) -> str
if cnt == min_cnt:
good_bad = 'bad'
elif cnt == max_cnt:
good_bad = 'good'
else:
good_bad = 'neutral'
return '<td class="number %s">%s</td>' % (good_bad, cnt)
cnts = ''.join(map(format_count, raw_cnts))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes):
# type: (Dict[str, float]) -> str
query = '''
SELECT
realm.string_id,
coalesce(user_counts.active_user_count, 0) active_user_count,
coalesce(at_risk_counts.at_risk_count, 0) at_risk_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND not is_bot
) user_profile_count,
(
SELECT
count(*)
FROM zerver_userprofile up
WHERE up.realm_id = realm.id
AND is_active
AND is_bot
) bot_count
FROM zerver_realm realm
LEFT OUTER JOIN
(
SELECT
up.realm_id realm_id,
count(distinct(ua.user_profile_id)) active_user_count
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
last_visit > now() - interval '1 day'
AND
not is_bot
GROUP BY realm_id
) user_counts
ON user_counts.realm_id = realm.id
LEFT OUTER JOIN
(
SELECT
realm_id,
count(*) at_risk_count
FROM (
SELECT
realm.id as realm_id,
up.email
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
JOIN zerver_realm realm
ON realm.id = up.realm_id
WHERE up.is_active
AND (not up.is_bot)
AND
ua.query in (
'/json/send_message',
'send_message_backend',
'/api/v1/send_message',
'/json/update_pointer',
'/json/users/me/pointer'
)
GROUP by realm.id, up.email
HAVING max(last_visit) between
now() - interval '7 day' and
now() - interval '1 day'
) as at_risk_users
GROUP BY realm_id
) at_risk_counts
ON at_risk_counts.realm_id = realm.id
WHERE EXISTS (
SELECT *
FROM zerver_useractivity ua
JOIN zerver_userprofile up
ON up.id = ua.user_profile_id
WHERE
query in (
'/json/send_message',
'/api/v1/send_message',
'send_message_backend',
'/json/update_pointer',
'/json/users/me/pointer'
)
AND
up.realm_id = realm.id
AND
last_visit > now() - interval '2 week'
)
ORDER BY active_user_count DESC, string_id ASC
'''
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row['history'] = counts[row['string_id']]['cnts']
except Exception:
row['history'] = ''
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row['string_id']
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row['hours'] = str(int(hours))
try:
row['hours_per_user'] = '%.1f' % (hours / row['active_user_count'],)
except Exception:
pass
# formatting
for row in rows:
row['string_id'] = realm_activity_link(row['string_id'])
# Count active sites
def meets_goal(row):
# type: (Dict[str, int]) -> bool
return row['active_user_count'] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_active_user_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_at_risk_count = 0
for row in rows:
total_active_user_count += int(row['active_user_count'])
total_user_profile_count += int(row['user_profile_count'])
total_bot_count += int(row['bot_count'])
total_at_risk_count += int(row['at_risk_count'])
rows.append(dict(
string_id='Total',
active_user_count=total_active_user_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
at_risk_count=total_at_risk_count,
))
content = loader.render_to_string(
'analytics/realm_summary_table.html',
dict(rows=rows, num_active_sites=num_active_sites)
)
return content
def user_activity_intervals():
# type: () -> Tuple[mark_safe, Dict[str, float]]
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end
).select_related(
'user_profile',
'user_profile__realm'
).only(
'start',
'end',
'user_profile__email',
'user_profile__realm__string_id'
).order_by(
'user_profile__realm__string_id',
'user_profile__email'
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += '<hr>%s\n' % (string_id,)
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += " %-*s%s\n" % (37, email, duration)
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += "\nTotal Duration: %s\n" % (total_duration,)
output += "\nTotal Duration in minutes: %s\n" % (total_duration.total_seconds() / 60.,)
output += "Total Duration amortized to a month: %s" % (total_duration.total_seconds() * 30. / 60.,)
content = mark_safe('<pre>' + output + '</pre>')
return content, realm_minutes
def sent_messages_report(realm):
# type: (str) -> str
title = 'Recently sent messages for ' + realm
cols = [
'Date',
'Humans',
'Bots'
]
query = '''
select
series.day::date,
humans.cnt,
bots.cnt
from (
select generate_series(
(now()::date - interval '2 week'),
now()::date,
interval '1 day'
) as day
) as series
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
(not up.is_bot)
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) humans on
series.day = humans.pub_date
left join (
select
pub_date::date pub_date,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
where
r.string_id = %s
and
up.is_bot
and
pub_date > now() - interval '2 week'
group by
pub_date::date
order by
pub_date::date
) bots on
series.day = bots.pub_date
'''
cursor = connection.cursor()
cursor.execute(query, [realm, realm])
rows = cursor.fetchall()
cursor.close()
return make_table(title, cols, rows)
def ad_hoc_queries():
# type: () -> List[Dict[str, str]]
def get_page(query, cols, title):
# type: (str, List[str], str) -> Dict[str, str]
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(i, fixup_func):
# type: (int, Union[Callable[[Realm], mark_safe], Callable[[datetime], str]]) -> None
for row in rows:
row[i] = fixup_func(row[i])
for i, col in enumerate(cols):
if col == 'Realm':
fix_rows(i, realm_activity_link)
elif col in ['Last time', 'Last visit']:
fix_rows(i, format_date_for_activity_reports)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title
)
pages = []
###
for mobile_type in ['Android', 'ZulipiOS']:
title = '%s usage' % (mobile_type,)
query = '''
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like '%s'
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
''' % (mobile_type,)
cols = [
'Realm',
'User id',
'Name',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Desktop users'
query = '''
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by realm'
query = '''
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
'''
cols = [
'Realm',
'Client',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
###
title = 'Integrations by client'
query = '''
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
'''
cols = [
'Client',
'Realm',
'Hits',
'Last time'
]
pages.append(get_page(query, cols, title))
return pages
@zulip_internal
@has_request_variables
def get_activity(request):
# type: (HttpRequest) -> HttpResponse
duration_content, realm_minutes = user_activity_intervals() # type: Tuple[mark_safe, Dict[str, float]]
counts_content = realm_summary_table(realm_minutes) # type: str
data = [
('Counts', counts_content),
('Durations', duration_content),
]
for page in ad_hoc_queries():
data.append((page['title'], page['content']))
title = 'Activity'
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title, is_home=True),
request=request
)
def get_user_activity_records_for_realm(realm, is_bot):
# type: (str, bool) -> QuerySet
fields = [
'user_profile__full_name',
'user_profile__email',
'query',
'client__name',
'count',
'last_visit',
]
records = UserActivity.objects.filter(
user_profile__realm__string_id=realm,
user_profile__is_active=True,
user_profile__is_bot=is_bot
)
records = records.order_by("user_profile__email", "-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def get_user_activity_records_for_email(email):
# type: (str) -> List[QuerySet]
fields = [
'user_profile__full_name',
'query',
'client__name',
'count',
'last_visit'
]
records = UserActivity.objects.filter(
user_profile__email=email
)
records = records.order_by("-last_visit")
records = records.select_related('user_profile', 'client').only(*fields)
return records
def raw_user_activity_table(records):
# type: (List[QuerySet]) -> str
cols = [
'query',
'client',
'count',
'last_visit'
]
def row(record):
# type: (QuerySet) -> List[Any]
return [
record.query,
record.client.name,
record.count,
format_date_for_activity_reports(record.last_visit)
]
rows = list(map(row, records))
title = 'Raw Data'
return make_table(title, cols, rows)
def get_user_activity_summary(records):
# type: (List[QuerySet]) -> Dict[str, Dict[str, Any]]
#: `Any` used above should be `Union(int, datetime)`.
#: However current version of `Union` does not work inside other function.
#: We could use something like:
# `Union[Dict[str, Dict[str, int]], Dict[str, Dict[str, datetime]]]`
#: but that would require this long `Union` to carry on throughout inner functions.
summary = {} # type: Dict[str, Dict[str, Any]]
def update(action, record):
# type: (str, QuerySet) -> None
if action not in summary:
summary[action] = dict(
count=record.count,
last_visit=record.last_visit
)
else:
summary[action]['count'] += record.count
summary[action]['last_visit'] = max(
summary[action]['last_visit'],
record.last_visit
)
if records:
summary['name'] = records[0].user_profile.full_name
for record in records:
client = record.client.name
query = record.query
update('use', record)
if client == 'API':
m = re.match('/api/.*/external/(.*)', query)
if m:
client = m.group(1)
update(client, record)
if client.startswith('desktop'):
update('desktop', record)
if client == 'website':
update('website', record)
if ('send_message' in query) or re.search('/api/.*/external/.*', query):
update('send', record)
if query in ['/json/update_pointer', '/json/users/me/pointer', '/api/v1/update_pointer']:
update('pointer', record)
update(client, record)
return summary
def format_date_for_activity_reports(date):
# type: (Optional[datetime]) -> str
if date:
return date.astimezone(eastern_tz).strftime('%Y-%m-%d %H:%M')
else:
return ''
def user_activity_link(email):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_user_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(email=email))
email_link = '<a href="%s">%s</a>' % (url, email)
return mark_safe(email_link)
def realm_activity_link(realm_str):
# type: (str) -> mark_safe
url_name = 'analytics.views.get_realm_activity'
url = urlresolvers.reverse(url_name, kwargs=dict(realm_str=realm_str))
realm_link = '<a href="%s">%s</a>' % (url, realm_str)
return mark_safe(realm_link)
def realm_client_table(user_summaries):
# type: (Dict[str, Dict[str, Dict[str, Any]]]) -> str
exclude_keys = [
'internal',
'name',
'use',
'send',
'pointer',
'website',
'desktop',
]
rows = []
for email, user_summary in user_summaries.items():
email_link = user_activity_link(email)
name = user_summary['name']
for k, v in user_summary.items():
if k in exclude_keys:
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
name,
email_link,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'Last visit',
'Client',
'Name',
'Email',
'Count',
]
title = 'Clients'
return make_table(title, cols, rows)
def user_activity_summary_table(user_summary):
# type: (Dict[str, Dict[str, Any]]) -> str
rows = []
for k, v in user_summary.items():
if k == 'name':
continue
client = k
count = v['count']
last_visit = v['last_visit']
row = [
format_date_for_activity_reports(last_visit),
client,
count,
]
rows.append(row)
rows = sorted(rows, key=lambda r: r[0], reverse=True)
cols = [
'last_visit',
'client',
'count',
]
title = 'User Activity'
return make_table(title, cols, rows)
def realm_user_summary_table(all_records, admin_emails):
# type: (List[QuerySet], Set[Text]) -> Tuple[Dict[str, Dict[str, Any]], str]
user_records = {}
def by_email(record):
# type: (QuerySet) -> str
return record.user_profile.email
for email, records in itertools.groupby(all_records, by_email):
user_records[email] = get_user_activity_summary(list(records))
def get_last_visit(user_summary, k):
# type: (Dict[str, Dict[str, datetime]], str) -> Optional[datetime]
if k in user_summary:
return user_summary[k]['last_visit']
else:
return None
def get_count(user_summary, k):
# type: (Dict[str, Dict[str, str]], str) -> str
if k in user_summary:
return user_summary[k]['count']
else:
return ''
def is_recent(val):
# type: (Optional[datetime]) -> bool
age = datetime.now(val.tzinfo) - val
return age.total_seconds() < 5 * 60
rows = []
for email, user_summary in user_records.items():
email_link = user_activity_link(email)
sent_count = get_count(user_summary, 'send')
cells = [user_summary['name'], email_link, sent_count]
row_class = ''
for field in ['use', 'send', 'pointer', 'desktop', 'ZulipiOS', 'Android']:
visit = get_last_visit(user_summary, field)
if field == 'use':
if visit and is_recent(visit):
row_class += ' recently_active'
if email in admin_emails:
row_class += ' admin'
val = format_date_for_activity_reports(visit)
cells.append(val)
row = dict(cells=cells, row_class=row_class)
rows.append(row)
def by_used_time(row):
# type: (Dict[str, Sequence[str]]) -> str
return row['cells'][3]
rows = sorted(rows, key=by_used_time, reverse=True)
cols = [
'Name',
'Email',
'Total sent',
'Heard from',
'Message sent',
'Pointer motion',
'Desktop',
'ZulipiOS',
'Android',
]
title = 'Summary'
content = make_table(title, cols, rows, has_row_class=True)
return user_records, content
@zulip_internal
def get_realm_activity(request, realm_str):
# type: (HttpRequest, str) -> HttpResponse
data = [] # type: List[Tuple[str, str]]
all_user_records = {} # type: Dict[str, Any]
try:
admins = Realm.objects.get(string_id=realm_str).get_admin_users()
except Realm.DoesNotExist:
return HttpResponseNotFound("Realm %s does not exist" % (realm_str,))
admin_emails = {admin.email for admin in admins}
for is_bot, page_title in [(False, 'Humans'), (True, 'Bots')]:
all_records = list(get_user_activity_records_for_realm(realm_str, is_bot))
user_records, content = realm_user_summary_table(all_records, admin_emails)
all_user_records.update(user_records)
data += [(page_title, content)]
page_title = 'Clients'
content = realm_client_table(all_user_records)
data += [(page_title, content)]
page_title = 'History'
content = sent_messages_report(realm_str)
data += [(page_title, content)]
realm_link = 'https://stats1.zulip.net:444/render/?from=-7days'
realm_link += '&target=stats.gauges.staging.users.active.%s.0_16hr' % (realm_str,)
title = realm_str
return render_to_response(
'analytics/activity.html',
dict(data=data, realm_link=realm_link, title=title),
request=request
)
@zulip_internal
def get_user_activity(request, email):
# type: (HttpRequest, str) -> HttpResponse
records = get_user_activity_records_for_email(email)
data = [] # type: List[Tuple[str, str]]
user_summary = get_user_activity_summary(records)
content = user_activity_summary_table(user_summary)
data += [('Summary', content)]
content = raw_user_activity_table(records)
data += [('Info', content)]
title = email
return render_to_response(
'analytics/activity.html',
dict(data=data, title=title),
request=request
)
|
|
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.extensions import *
# Define type used
ProductAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('ProductAd'))
TextAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('TextAd'))
AppInstallAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('AppInstallAd'))
ExpandedTextAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('ExpandedTextAd'))
DynamicSearchAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('DynamicSearchAd'))
ResponsiveSearchAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('ResponsiveSearchAd'))
ResponsiveAd = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('ResponsiveAd'))
class _BulkAd(_SingleRecordBulkEntity):
""" This abstract base class provides properties that are shared by all bulk ad classes.
*See also:*
* :class:`.BulkProductAd`
* :class:`.BulkTextAd`
* :class:`.BulkAppInstallAd`
* :class:`.BulkExpandedTextAd`
* :class:`.BulkDynamicSearchAd`
* :class:`.BulkResponsiveAd`
* :class:`.BulkResponsiveSearchAd`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(_BulkAd, self).__init__()
self._ad_group_id = ad_group_id
self._campaign_name = campaign_name
self._ad_group_name = ad_group_name
self._ad = ad
self._performance_data = None
@property
def ad_group_id(self):
""" The identifier of the ad group that contains the ad.
Corresponds to the 'Parent Id' field in the bulk file.
:rtype: int
"""
return self._ad_group_id
@ad_group_id.setter
def ad_group_id(self, ad_group_id):
self._ad_group_id = ad_group_id
@property
def campaign_name(self):
""" The name of the campaign that contains the ad.
Corresponds to the 'Campaign' field in the bulk file.
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
@property
def ad_group_name(self):
""" The name of the ad group that contains the ad.
Corresponds to the 'Ad Group' field in the bulk file.
:rtype: str
"""
return self._ad_group_name
@ad_group_name.setter
def ad_group_name(self, ad_group_name):
self._ad_group_name = ad_group_name
@property
def ad(self):
""" The type of ad.
"""
return self._ad
@ad.setter
def ad(self, ad):
self._ad = ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.ad.Status),
csv_to_field=lambda c, v: setattr(c.ad, 'Status', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.ad.Id),
csv_to_field=lambda c, v: setattr(c.ad, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.ad_group_id),
csv_to_field=lambda c, v: setattr(c, '_ad_group_id', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, '_campaign_name', v)
),
_SimpleBulkMapping(
header=_StringTable.AdGroup,
field_to_csv=lambda c: c.ad_group_name,
csv_to_field=lambda c, v: setattr(c, '_ad_group_name', v)
),
_SimpleBulkMapping(
header=_StringTable.EditorialStatus,
field_to_csv=lambda c: c.ad.EditorialStatus,
csv_to_field=lambda c, v: setattr(c.ad, 'EditorialStatus', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.DevicePreference,
field_to_csv=lambda c: bulk_device_preference_str(c.ad.DevicePreference),
csv_to_field=lambda c, v: setattr(c.ad, 'DevicePreference', parse_device_preference(v))
),
_SimpleBulkMapping(
header=_StringTable.AdFormatPreference,
field_to_csv=lambda c: bulk_str(c.ad.AdFormatPreference),
csv_to_field=lambda c, v: setattr(c.ad, 'AdFormatPreference', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.FinalUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.ad.FinalUrls, c.ad.Id),
csv_to_field=lambda c, v: csv_to_field_Urls(c.ad.FinalUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalMobileUrl,
field_to_csv=lambda c: field_to_csv_Urls(c.ad.FinalMobileUrls, c.ad.Id),
csv_to_field=lambda c, v: csv_to_field_Urls(c.ad.FinalMobileUrls, v)
),
_SimpleBulkMapping(
header=_StringTable.TrackingTemplate,
field_to_csv=lambda c: bulk_str(c.ad.TrackingUrlTemplate),
csv_to_field=lambda c, v: setattr(c.ad, 'TrackingUrlTemplate', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.CustomParameter,
field_to_csv=lambda c: field_to_csv_UrlCustomParameters(c.ad),
csv_to_field=lambda c, v: csv_to_field_UrlCustomParameters(c.ad, v)
),
_SimpleBulkMapping(
header=_StringTable.FinalUrlSuffix,
field_to_csv=lambda c: bulk_optional_str(c.ad.FinalUrlSuffix, c.ad.Id),
csv_to_field=lambda c, v: setattr(c.ad, 'FinalUrlSuffix', v if v else None)
)
]
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self.convert_to_values(row_values, _BulkAd._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
row_values.convert_to_entity(self, _BulkAd._MAPPINGS)
def read_additional_data(self, stream_reader):
super(_BulkAd, self).read_additional_data(stream_reader)
class BulkProductAd(_BulkAd):
""" Represents a product ad.
This class exposes the :attr:`product_ad` property that can be read and written as fields of the Product Ad record in a bulk file.
For more information, see Product Ad at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkProductAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad
)
self.product_ad = ad
@property
def product_ad(self):
""" The product ad.
See Product Ad at: https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad
@product_ad.setter
def product_ad(self, product_ad):
if product_ad is not None and not isinstance(product_ad, ProductAd):
raise ValueError('Not an instance of ProductAd')
self._ad = product_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.PromotionalText,
field_to_csv=lambda c: bulk_optional_str(c.product_ad.PromotionalText, c.product_ad.Id),
csv_to_field=lambda c, v: setattr(c.product_ad, 'PromotionalText', v if v else '')
),
]
def process_mappings_from_row_values(self, row_values):
self.product_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('ProductAd')
self.product_ad.Type = 'Product'
super(BulkProductAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkProductAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.product_ad, 'product_ad')
super(BulkProductAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkProductAd._MAPPINGS)
class BulkTextAd(_BulkAd):
""" Represents a Text Ad.
This class exposes the :attr:`text_ad` property that can be read and written as fields of the Text Ad record in a bulk file.
For more information, see Text Ad at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkTextAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self.text_ad = ad
@property
def text_ad(self):
""" The text ad.
see Text Ad at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad
@text_ad.setter
def text_ad(self, text_ad):
if text_ad is not None and not isinstance(text_ad, TextAd):
raise ValueError('Not an instance of TextAd')
self._ad = text_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Title,
field_to_csv=lambda c: c.text_ad.Title,
csv_to_field=lambda c, v: setattr(c.text_ad, 'Title', v)
),
_SimpleBulkMapping(
header=_StringTable.Text,
field_to_csv=lambda c: c.text_ad.Text,
csv_to_field=lambda c, v: setattr(c.text_ad, 'Text', v)
),
_SimpleBulkMapping(
header=_StringTable.DisplayUrl,
field_to_csv=lambda c: bulk_optional_str(c.text_ad.DisplayUrl, c.text_ad.Id),
csv_to_field=lambda c, v: setattr(c.text_ad, 'DisplayUrl', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.DestinationUrl,
field_to_csv=lambda c: bulk_optional_str(c.text_ad.DestinationUrl, c.text_ad.Id),
csv_to_field=lambda c, v: setattr(c.text_ad, 'DestinationUrl', v if v else '')
),
]
def process_mappings_from_row_values(self, row_values):
self.text_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('TextAd')
self.text_ad.Type = 'Text'
super(BulkTextAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkTextAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.text_ad, 'text_ad')
super(BulkTextAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkTextAd._MAPPINGS)
class BulkAppInstallAd(_BulkAd):
""" Represents an App Install Ad.
This class exposes the :attr:`app_install_ad` property that can be read and written as fields of the App Install Ad record in a bulk file.
For more information, see App Install Ad at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkAppInstallAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self.app_install_ad = ad
@property
def app_install_ad(self):
""" The App Install Ad.
see App Install Ad at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad
@app_install_ad.setter
def app_install_ad(self, app_install_ad):
if app_install_ad is not None and not isinstance(app_install_ad, AppInstallAd):
raise ValueError('Not an instance of AppInstallAd')
self._ad = app_install_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.AppPlatform,
field_to_csv=lambda c: c.app_install_ad.AppPlatform,
csv_to_field=lambda c, v: setattr(c.app_install_ad, 'AppPlatform', v)
),
_SimpleBulkMapping(
header=_StringTable.AppStoreId,
field_to_csv=lambda c: c.app_install_ad.AppStoreId,
csv_to_field=lambda c, v: setattr(c.app_install_ad, 'AppStoreId', v)
),
_SimpleBulkMapping(
header=_StringTable.Title,
field_to_csv=lambda c: c.app_install_ad.Title,
csv_to_field=lambda c, v: setattr(c.app_install_ad, 'Title', v)
),
_SimpleBulkMapping(
header=_StringTable.Text,
field_to_csv=lambda c: c.app_install_ad.Text,
csv_to_field=lambda c, v: setattr(c.app_install_ad, 'Text', v)
),
]
def process_mappings_from_row_values(self, row_values):
self.app_install_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('AppInstallAd')
self.app_install_ad.Type = 'AppInstall'
super(BulkAppInstallAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkAppInstallAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.app_install_ad, 'app_install_ad')
super(BulkAppInstallAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkAppInstallAd._MAPPINGS)
class BulkExpandedTextAd(_BulkAd):
""" Represents an Expanded Text Ad.
This class exposes the :attr:`expanded_text_ad` property that can be read and written as fields of the Expanded Text Ad record in a bulk file.
For more information, see Expanded Text Ad at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkExpandedTextAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self.expanded_text_ad = ad
@property
def expanded_text_ad(self):
""" The Expanded Text Ad.
see Expanded Text Ad at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad
@expanded_text_ad.setter
def expanded_text_ad(self, expanded_text_ad):
if expanded_text_ad is not None and not isinstance(expanded_text_ad, ExpandedTextAd):
raise ValueError('Not an instance of ExpandedTextAd')
self._ad = expanded_text_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Text,
field_to_csv=lambda c: c.expanded_text_ad.Text,
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'Text', v)
),
_SimpleBulkMapping(
header=_StringTable.TextPart2,
field_to_csv=lambda c: bulk_optional_str(c.expanded_text_ad.TextPart2, c.expanded_text_ad.Id),
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'TextPart2', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.TitlePart1,
field_to_csv=lambda c: c.expanded_text_ad.TitlePart1,
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'TitlePart1', v)
),
_SimpleBulkMapping(
header=_StringTable.TitlePart2,
field_to_csv=lambda c: c.expanded_text_ad.TitlePart2,
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'TitlePart2', v)
),
_SimpleBulkMapping(
header=_StringTable.TitlePart3,
field_to_csv=lambda c: bulk_optional_str(c.expanded_text_ad.TitlePart3, c.expanded_text_ad.Id),
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'TitlePart3', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.Path1,
field_to_csv=lambda c: bulk_optional_str(c.expanded_text_ad.Path1, c.expanded_text_ad.Id),
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'Path1', v)
),
_SimpleBulkMapping(
header=_StringTable.Path2,
field_to_csv=lambda c: bulk_optional_str(c.expanded_text_ad.Path2, c.expanded_text_ad.Id),
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'Path2', v)
),
_SimpleBulkMapping(
header=_StringTable.Domain,
field_to_csv=lambda c: bulk_optional_str(c.expanded_text_ad.Domain, c.expanded_text_ad.Id),
csv_to_field=lambda c, v: setattr(c.expanded_text_ad, 'Domain', v)
),
]
def process_mappings_from_row_values(self, row_values):
self.expanded_text_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('ExpandedTextAd')
self.expanded_text_ad.Type = 'ExpandedText'
super(BulkExpandedTextAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkExpandedTextAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.expanded_text_ad, 'expanded_text_ad')
super(BulkExpandedTextAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkExpandedTextAd._MAPPINGS)
class BulkDynamicSearchAd(_BulkAd):
""" Represents a Dynamic Search Ad.
This class exposes the :attr:`dynamic_search_ad` property that can be read and written as fields of the Dynamic Search Ad record in a bulk file.
For more information, see Dynamic Search Ad at https://go.microsoft.com/fwlink/?linkid=836840.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkDynamicSearchAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self.dynamic_search_ad = ad
@property
def dynamic_search_ad(self):
""" The dynamic search ad.
see Dynamic Search Ad at https://go.microsoft.com/fwlink/?linkid=836840.
"""
return self._ad
@dynamic_search_ad.setter
def dynamic_search_ad(self, dynamic_search_ad):
if dynamic_search_ad is not None and not isinstance(dynamic_search_ad, DynamicSearchAd):
raise ValueError('Not an instance of DynamicSearchAd')
self._ad = dynamic_search_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Text,
field_to_csv=lambda c: c.dynamic_search_ad.Text,
csv_to_field=lambda c, v: setattr(c.dynamic_search_ad, 'Text', v)
),
_SimpleBulkMapping(
header=_StringTable.Path1,
field_to_csv=lambda c: c.dynamic_search_ad.Path1,
csv_to_field=lambda c, v: setattr(c.dynamic_search_ad, 'Path1', v)
),
_SimpleBulkMapping(
header=_StringTable.Path2,
field_to_csv=lambda c: c.dynamic_search_ad.Path2,
csv_to_field=lambda c, v: setattr(c.dynamic_search_ad, 'Path2', v)
),
_SimpleBulkMapping(
header=_StringTable.TextPart2,
field_to_csv=lambda c: c.dynamic_search_ad.TextPart2,
csv_to_field=lambda c, v: setattr(c.dynamic_search_ad, 'TextPart2', v)
),
]
def process_mappings_from_row_values(self, row_values):
self.dynamic_search_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('DynamicSearchAd')
self.dynamic_search_ad.Type = 'DynamicSearch'
super(BulkDynamicSearchAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkDynamicSearchAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.dynamic_search_ad, 'dynamic_search_ad')
super(BulkDynamicSearchAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkDynamicSearchAd._MAPPINGS)
class BulkResponsiveAd(_BulkAd):
""" Represents a Responsive Ad.
This class exposes the :attr:`responsive_ad` property that can be read and written as fields of the Responsive Ad record in a bulk file.
For more information, see Responsive Ad at https://go.microsoft.com/fwlink/?linkid=836840.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkResponsiveAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self._ad = ad
@property
def responsive_ad(self):
""" The responsive search ad.
see Responsive Ad at https://go.microsoft.com/fwlink/?linkid=836840.
"""
return self._ad
@responsive_ad.setter
def responsive_ad(self, responsive_ad):
if responsive_ad is not None and not isinstance(responsive_ad, ResponsiveAd):
raise ValueError('Not an instance of ResponsiveAd')
self._ad = responsive_ad
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.BusinessName,
field_to_csv=lambda c: c.responsive_ad.BusinessName,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'BusinessName', v)
),
_SimpleBulkMapping(
header=_StringTable.CallToAction,
field_to_csv=lambda c: c.responsive_ad.CallToAction,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'CallToAction', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Headline,
field_to_csv=lambda c: c.responsive_ad.Headline,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'Headline', v)
),
_SimpleBulkMapping(
header=_StringTable.LongHeadline,
field_to_csv=lambda c: c.responsive_ad.LongHeadlineString,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'LongHeadlineString', v)
),
_SimpleBulkMapping(
header=_StringTable.LandscapeImageMediaId,
field_to_csv=lambda c: c.responsive_ad.LandscapeImageMediaId,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'LandscapeImageMediaId', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.LandscapeLogoMediaId,
field_to_csv=lambda c: c.responsive_ad.LandscapeLogoMediaId,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'LandscapeLogoMediaId', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.SquareImageMediaId,
field_to_csv=lambda c: c.responsive_ad.SquareImageMediaId,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'SquareImageMediaId', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.SquareLogoMediaId,
field_to_csv=lambda c: c.responsive_ad.SquareLogoMediaId,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'SquareLogoMediaId', int(v) if v else None)
),
_SimpleBulkMapping(
header=_StringTable.Text,
field_to_csv=lambda c: c.responsive_ad.Text,
csv_to_field=lambda c, v: setattr(c.responsive_ad, 'Text', v)
),
_SimpleBulkMapping(
header=_StringTable.Images,
field_to_csv=lambda c: field_to_csv_ImageAssetLinks(c.responsive_ad.Images),
csv_to_field=lambda c, v: csv_to_field_ImageAssetLinks(c.responsive_ad.Images, v)
),
]
def process_mappings_from_row_values(self, row_values):
self.responsive_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('ResponsiveAd')
self.responsive_ad.Type = 'Responsive'
super(BulkResponsiveAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkResponsiveAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.responsive_ad, 'responsive_ad')
super(BulkResponsiveAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkResponsiveAd._MAPPINGS)
class BulkResponsiveSearchAd(_BulkAd):
""" Represents a Responsive Search Ad.
This class exposes the :attr:`responsive_search_ad` property that can be read and written as fields of the Responsive Search Ad record in a bulk file.
For more information, see Responsive Search Ad at https://go.microsoft.com/fwlink/?linkid=836840.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
ad_group_id=None,
campaign_name=None,
ad_group_name=None,
ad=None):
super(BulkResponsiveSearchAd, self).__init__(
ad_group_id,
campaign_name,
ad_group_name,
ad,
)
self._ad = ad
@property
def responsive_search_ad(self):
""" The responsive search ad.
see Responsive Search Ad at https://go.microsoft.com/fwlink/?linkid=836840.
"""
return self._ad
@responsive_search_ad.setter
def responsive_search_ad(self, rsa):
if rsa is not None and not isinstance(rsa, ResponsiveSearchAd):
raise ValueError('Not an instance of ResponsiveSearchAd')
self._ad = rsa
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.Path1,
field_to_csv=lambda c: bulk_optional_str(c.responsive_search_ad.Path1, c.responsive_search_ad.Id),
csv_to_field=lambda c, v: setattr(c.responsive_search_ad, 'Path1', v)
),
_SimpleBulkMapping(
header=_StringTable.Path2,
field_to_csv=lambda c: bulk_optional_str(c.responsive_search_ad.Path2, c.responsive_search_ad.Id),
csv_to_field=lambda c, v: setattr(c.responsive_search_ad, 'Path2', v)
),
_SimpleBulkMapping(
header=_StringTable.Domain,
field_to_csv=lambda c: bulk_optional_str(c.responsive_search_ad.Domain, c.responsive_search_ad.Id),
csv_to_field=lambda c, v: setattr(c.responsive_search_ad, 'Domain', v)
),
_SimpleBulkMapping(
header=_StringTable.Headline,
field_to_csv=lambda c: field_to_csv_Rsa_TextAssetLinks(c.responsive_search_ad.Headlines),
csv_to_field=lambda c, v: csv_to_field_Rsa_TextAssetLinks(c.responsive_search_ad.Headlines, v)
),
_SimpleBulkMapping(
header=_StringTable.Description,
field_to_csv=lambda c: field_to_csv_Rsa_TextAssetLinks(c.responsive_search_ad.Descriptions),
csv_to_field=lambda c, v: csv_to_field_Rsa_TextAssetLinks(c.responsive_search_ad.Descriptions ,v)
)
]
def process_mappings_from_row_values(self, row_values):
self.responsive_search_ad = _CAMPAIGN_OBJECT_FACTORY_V13.create('ResponsiveSearchAd')
self.responsive_search_ad.Type = 'ResponsiveSearch'
super(BulkResponsiveSearchAd, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkResponsiveSearchAd._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.responsive_search_ad, 'responsive_search_ad')
super(BulkResponsiveSearchAd, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkResponsiveSearchAd._MAPPINGS)
|
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys,logging, traceback, time
from cassandra import ConsistencyLevel, OperationTimedOut, ReadTimeout, WriteTimeout, ReadFailure, WriteFailure,\
FunctionFailure
from cassandra.cluster import Cluster
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.query import SimpleStatement
from tests.integration import use_singledc, PROTOCOL_VERSION, get_cluster, setup_keyspace, remove_cluster, get_node
from mock import Mock
try:
import unittest2 as unittest
except ImportError:
import unittest
log = logging.getLogger(__name__)
def setup_module():
"""
We need some custom setup for this module. All unit tests in this module
require protocol >=4. We won't bother going through the setup required unless that is the
protocol version we are using.
"""
# If we aren't at protocol v 4 or greater don't waste time setting anything up, all tests will be skipped
if PROTOCOL_VERSION >= 4:
use_singledc(start=False)
ccm_cluster = get_cluster()
ccm_cluster.stop()
config_options = {'tombstone_failure_threshold': 2000, 'tombstone_warn_threshold': 1000}
ccm_cluster.set_configuration_options(config_options)
ccm_cluster.start(wait_for_binary_proto=True, wait_other_notice=True)
setup_keyspace()
def teardown_module():
"""
The rest of the tests don't need custom tombstones
remove the cluster so as to not interfere with other tests.
"""
if PROTOCOL_VERSION >= 4:
remove_cluster()
class ClientExceptionTests(unittest.TestCase):
def setUp(self):
"""
Test is skipped if run with native protocol version <4
"""
if PROTOCOL_VERSION < 4:
raise unittest.SkipTest(
"Native protocol 4,0+ is required for custom payloads, currently using %r"
% (PROTOCOL_VERSION,))
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
self.nodes_currently_failing = []
self.node1, self.node2, self.node3 = get_cluster().nodes.values()
def tearDown(self):
self.cluster.shutdown()
failing_nodes = []
# Restart the nodes to fully functional again
self.setFailingNodes(failing_nodes, "testksfail")
def execute_helper(self, session, query):
tries = 0
while tries < 100:
try:
return session.execute(query)
except OperationTimedOut:
ex_type, ex, tb = sys.exc_info()
log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
del tb
tries += 1
raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query))
def execute_concurrent_args_helper(self, session, query, params):
tries = 0
while tries < 100:
try:
return execute_concurrent_with_args(session, query, params, concurrency=50)
except (ReadTimeout, WriteTimeout, OperationTimedOut, ReadFailure, WriteFailure):
ex_type, ex, tb = sys.exc_info()
log.warn("{0}: {1} Backtrace: {2}".format(ex_type.__name__, ex, traceback.extract_tb(tb)))
del tb
tries += 1
raise RuntimeError("Failed to execute query after 100 attempts: {0}".format(query))
def setFailingNodes(self, failing_nodes, keyspace):
"""
This method will take in a set of failing nodes, and toggle all of the nodes in the provided list to fail
writes.
@param failing_nodes A definitive list of nodes that should fail writes
@param keyspace The keyspace to enable failures on
"""
# Ensure all of the nodes on the list have failures enabled
for node in failing_nodes:
if node not in self.nodes_currently_failing:
node.stop(wait_other_notice=True, gently=False)
node.start(jvm_args=[" -Dcassandra.test.fail_writes_ks=" + keyspace], wait_for_binary_proto=True,
wait_other_notice=True)
self.nodes_currently_failing.append(node)
# Ensure all nodes not on the list, but that are currently set to failing are enabled
for node in self.nodes_currently_failing:
if node not in failing_nodes:
node.stop(wait_other_notice=True, gently=False)
node.start(wait_for_binary_proto=True, wait_other_notice=True)
self.nodes_currently_failing.remove(node)
def _perform_cql_statement(self, text, consistency_level, expected_exception):
"""
Simple helper method to preform cql statements and check for expected exception
@param text CQl statement to execute
@param consistency_level Consistency level at which it is to be executed
@param expected_exception Exception expected to be throw or none
"""
statement = SimpleStatement(text)
statement.consistency_level = consistency_level
if expected_exception is None:
self.execute_helper(self.session, statement)
else:
with self.assertRaises(expected_exception):
self.execute_helper(self.session, statement)
def test_write_failures_from_coordinator(self):
"""
Test to validate that write failures from the coordinator are surfaced appropriately.
test_write_failures_from_coordinator Enable write failures on the various nodes using a custom jvm flag,
cassandra.test.fail_writes_ks. This will cause writes to fail on that specific node. Depending on the replication
factor of the keyspace, and the consistency level, we will expect the coordinator to send WriteFailure, or not.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Appropriate write failures from the coordinator
@test_category queries:basic
"""
# Setup temporary keyspace.
self._perform_cql_statement(
"""
CREATE KEYSPACE testksfail
WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# create table
self._perform_cql_statement(
"""
CREATE TABLE testksfail.test (
k int PRIMARY KEY,
v int )
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Disable one node
failing_nodes = [self.node1]
self.setFailingNodes(failing_nodes, "testksfail")
# With one node disabled we would expect a write failure with ConsistencyLevel of all
self._perform_cql_statement(
"""
INSERT INTO testksfail.test (k, v) VALUES (1, 0 )
""", consistency_level=ConsistencyLevel.ALL, expected_exception=WriteFailure)
# We have two nodes left so a write with consistency level of QUORUM should complete as expected
self._perform_cql_statement(
"""
INSERT INTO testksfail.test (k, v) VALUES (1, 0 )
""", consistency_level=ConsistencyLevel.QUORUM, expected_exception=None)
failing_nodes = []
# Restart the nodes to fully functional again
self.setFailingNodes(failing_nodes, "testksfail")
# Drop temporary keyspace
self._perform_cql_statement(
"""
DROP KEYSPACE testksfail
""", consistency_level=ConsistencyLevel.ANY, expected_exception=None)
def test_tombstone_overflow_read_failure(self):
"""
Test to validate that a ReadFailure is returned from the node when a specified threshold of tombstombs is
reached.
test_tombstomb_overflow_read_failure First sets the tombstone failure threshold down to a level that allows it
to be more easily encountered. We then create some wide rows and ensure they are deleted appropriately. This
produces the correct amount of tombstombs. Upon making a simple query we expect to get a read failure back
from the coordinator.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Appropriate write failures from the coordinator
@test_category queries:basic
"""
# Setup table for "wide row"
self._perform_cql_statement(
"""
CREATE TABLE test3rf.test2 (
k int,
v0 int,
v1 int, PRIMARY KEY (k,v0))
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
statement = self.session.prepare("INSERT INTO test3rf.test2 (k, v0,v1) VALUES (1,?,1)")
parameters = [(x,) for x in range(3000)]
self.execute_concurrent_args_helper(self.session, statement, parameters)
statement = self.session.prepare("DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?")
parameters = [(x,) for x in range(2001)]
self.execute_concurrent_args_helper(self.session, statement, parameters)
self._perform_cql_statement(
"""
SELECT * FROM test3rf.test2 WHERE k = 1
""", consistency_level=ConsistencyLevel.ALL, expected_exception=ReadFailure)
self._perform_cql_statement(
"""
DROP TABLE test3rf.test2;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
def test_user_function_failure(self):
"""
Test to validate that exceptions in user defined function are correctly surfaced by the driver to us.
test_user_function_failure First creates a table to use for testing. Then creates a function that will throw an
exception when invoked. It then invokes the function and expects a FunctionException. Finally it preforms
cleanup operations.
@since 2.6.0
@jira_ticket PYTHON-238
@expected_result Function failures when UDF throws exception
@test_category queries:basic
"""
# create UDF that throws an exception
self._perform_cql_statement(
"""
CREATE FUNCTION test3rf.test_failure(d double)
RETURNS NULL ON NULL INPUT
RETURNS double
LANGUAGE java AS 'throw new RuntimeException("failure");';
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Create test table
self._perform_cql_statement(
"""
CREATE TABLE test3rf.d (k int PRIMARY KEY , d double);
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Insert some values
self._perform_cql_statement(
"""
INSERT INTO test3rf.d (k,d) VALUES (0, 5.12);
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
# Run the function expect a function failure exception
self._perform_cql_statement(
"""
SELECT test_failure(d) FROM test3rf.d WHERE k = 0;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=FunctionFailure)
self._perform_cql_statement(
"""
DROP FUNCTION test3rf.test_failure;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
self._perform_cql_statement(
"""
DROP TABLE test3rf.d;
""", consistency_level=ConsistencyLevel.ALL, expected_exception=None)
class TimeoutTimerTest(unittest.TestCase):
def setUp(self):
"""
Setup sessions and pause node1
"""
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
# self.node1, self.node2, self.node3 = get_cluster().nodes.values()
self.node1 = get_node(1)
self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect()
ddl = '''
CREATE TABLE test3rf.timeout (
k int PRIMARY KEY,
v int )'''
self.session.execute(ddl)
self.node1.pause()
def tearDown(self):
"""
Shutdown cluster and resume node1
"""
self.node1.resume()
self.session.execute("DROP TABLE test3rf.timeout")
self.cluster.shutdown()
def test_async_timeouts(self):
"""
Test to validate that timeouts are honored
Exercise the underlying timeouts, by attempting a query that will timeout. Ensure the default timeout is still
honored. Make sure that user timeouts are also honored.
@since 2.7.0
@jira_ticket PYTHON-108
@expected_result timeouts should be honored
@test_category
"""
# Because node1 is stopped these statements will all timeout
ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ALL)
# Test with default timeout (should be 10)
start_time = time.time()
future = self.session.execute_async(ss)
with self.assertRaises(OperationTimedOut):
future.result()
end_time = time.time()
total_time = end_time-start_time
expected_time = self.session.default_timeout
# check timeout and ensure it's within a reasonable range
self.assertAlmostEqual(expected_time, total_time, delta=.05)
# Test with user defined timeout (Should be 1)
start_time = time.time()
future = self.session.execute_async(ss, timeout=1)
mock_callback = Mock(return_value=None)
mock_errorback = Mock(return_value=None)
future.add_callback(mock_callback)
future.add_errback(mock_errorback)
with self.assertRaises(OperationTimedOut):
future.result()
end_time = time.time()
total_time = end_time-start_time
expected_time = 1
# check timeout and ensure it's within a reasonable range
self.assertAlmostEqual(expected_time, total_time, delta=.05)
self.assertTrue(mock_errorback.called)
self.assertFalse(mock_callback.called)
|
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.cogdominium.CogdoMazeGameObjects
from panda3d.direct import WaitInterval
from panda3d.core import BitMask32, CollideMask, CollisionNode, CollisionSphere, CollisionTube, NodePath, Point3, Point4, Vec3, Vec4
from direct.interval.IntervalGlobal import LerpScaleInterval, LerpColorScaleInterval, LerpPosInterval, LerpFunc
from direct.interval.IntervalGlobal import Func, Sequence, Parallel
from direct.showbase.DirectObject import DirectObject
from direct.task.Task import Task
from toontown.toonbase import ToontownGlobals
import CogdoMazeGameGlobals as Globals
from CogdoGameExit import CogdoGameExit
import CogdoUtil
import math
import random
class CogdoMazeSplattable:
def __init__(self, object, name, collisionRadius):
self.object = object
self.splat = CogdoUtil.loadMazeModel('splash')
self.splat.setBillboardPointEye()
self.splat.setBin('fixed', 40)
self.splat.setDepthTest(False)
self.splat.setDepthWrite(False)
self.splatTrack = None
self._splatSfxIval = base.cogdoGameAudioMgr.createSfxIval('splat')
self.initGagCollision(name, collisionRadius)
return
def destroy(self):
self.disableGagCollision()
if self._splatSfxIval.isPlaying():
self._splatSfxIval.finish()
del self._splatSfxIval
def initGagCollision(self, name, radius):
self.gagCollisionName = name
collision = CollisionTube(0, 0, 0, 0, 0, 4, radius)
collision.setTangible(1)
self.gagCollNode = CollisionNode(self.gagCollisionName)
self.gagCollNode.setIntoCollideMask(ToontownGlobals.PieBitmask)
self.gagCollNode.addSolid(collision)
self.gagCollNodePath = self.object.attachNewNode(self.gagCollNode)
def disableGagCollision(self):
self.gagCollNodePath.removeNode()
def doSplat(self):
if self.splatTrack and self.splatTrack.isPlaying():
self.splatTrack.finish()
self.splat.reparentTo(render)
self.splat.setPos(self.object, 0, 0, 3.0)
self.splat.setY(self.splat.getY() - 1.0)
self._splatSfxIval.node = self.splat
self.splatTrack = Parallel(self._splatSfxIval, Sequence(Func(self.splat.showThrough), LerpScaleInterval(self.splat, duration=0.5, scale=6, startScale=1, blendType='easeOut'), Func(self.splat.hide)))
self.splatTrack.start()
class CogdoMazeDrop(NodePath, DirectObject):
def __init__(self, game, id, x, y):
NodePath.__init__(self, 'dropNode%s' % id)
self.game = game
self.id = id
self.reparentTo(hidden)
self.setPos(x, y, 0)
shadow = loader.loadModel('phase_3/models/props/square_drop_shadow')
shadow.setZ(0.2)
shadow.setBin('ground', 10)
shadow.setColor(1, 1, 1, 1)
shadow.reparentTo(self)
self.shadow = shadow
drop = CogdoUtil.loadMazeModel('cabinetSmFalling')
roll = random.randint(-15, 15)
drop.setHpr(0, 0, roll)
drop.setZ(Globals.DropHeight)
self.collTube = CollisionTube(0, 0, 0, 0, 0, 4, Globals.DropCollisionRadius)
self.collTube.setTangible(0)
name = Globals.DropCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collTube)
self.collNodePath = drop.attachNewNode(self.collNode)
self.collNodePath.hide()
self.collNodePath.setTag('isFalling', str('True'))
drop.reparentTo(self)
self.drop = drop
self._dropSfx = base.cogdoGameAudioMgr.createSfxIval('drop', volume=0.6)
def disableCollisionDamage(self):
self.collTube.setTangible(1)
self.collTube.setRadius(Globals.DroppedCollisionRadius)
self.collNode.setIntoCollideMask(ToontownGlobals.WallBitmask)
self.collNodePath.setTag('isFalling', str('False'))
def getDropIval(self):
shadow = self.shadow
drop = self.drop
id = self.id
hangTime = Globals.ShadowTime
dropTime = Globals.DropTime
dropHeight = Globals.DropHeight
targetShadowScale = 0.5
targetShadowAlpha = 0.4
shadowScaleIval = LerpScaleInterval(shadow, dropTime, targetShadowScale, startScale=0)
shadowAlphaIval = LerpColorScaleInterval(shadow, hangTime, Point4(1, 1, 1, targetShadowAlpha), startColorScale=Point4(1, 1, 1, 0))
shadowIval = Parallel(shadowScaleIval, shadowAlphaIval)
startPos = Point3(0, 0, dropHeight)
drop.setPos(startPos)
dropIval = LerpPosInterval(drop, dropTime, Point3(0, 0, 0), startPos=startPos, blendType='easeIn')
dropSoundIval = self._dropSfx
dropSoundIval.node = self
self.drop.setTransparency(1)
def _setRandScale(t):
self.drop.setScale(self, 1 - random.random() / 16, 1 - random.random() / 16, 1 - random.random() / 4)
scaleChange = 0.4 + random.random() / 4
dropShakeSeq = Sequence(LerpScaleInterval(self.drop, 0.25, Vec3(1.0 + scaleChange, 1.0 + scaleChange / 2, 1.0 - scaleChange), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.25, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), Func(self.disableCollisionDamage), LerpScaleInterval(self.drop, 0.2, Vec3(1.0 + scaleChange / 8, 1.0 + scaleChange / 8, 1.0 - scaleChange / 8), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.2, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.15, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 16, 1.0 - scaleChange / 16), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.15, Vec3(1.0, 1.0, 1.0), blendType='easeInOut'), LerpScaleInterval(self.drop, 0.1, Vec3(1.0 + scaleChange / 16, 1.0 + scaleChange / 8, 1.0 - scaleChange / 16), blendType='easeInOut'), LerpColorScaleInterval(self.drop, Globals.DropFadeTime, Vec4(1.0, 1.0, 1.0, 0.0)))
ival = Sequence(Func(self.reparentTo, render), Parallel(Sequence(WaitInterval(hangTime), dropIval), shadowIval), Parallel(Func(self.game.dropHit, self, id), dropSoundIval, dropShakeSeq), Func(self.game.cleanupDrop, id), name='drop%s' % id)
self.ival = ival
return ival
def destroy(self):
self.ival.pause()
self.ival = None
self._dropSfx.pause()
self._dropSfx = None
self.collTube = None
self.collNode = None
self.collNodePath.removeNode()
self.collNodePath = None
self.removeNode()
return
class CogdoMazeExit(CogdoGameExit, DirectObject):
EnterEventName = 'CogdoMazeDoor_Enter'
def __init__(self):
CogdoGameExit.__init__(self)
self.revealed = False
self._players = []
self._initCollisions()
def _initCollisions(self):
collSphere = CollisionSphere(0, 0, 0, 3.0)
collSphere.setTangible(0)
self.collNode = CollisionNode(self.getName())
self.collNode.addSolid(collSphere)
self.collNP = self.attachNewNode(self.collNode)
def destroy(self):
self.ignoreAll()
CogdoGameExit.destroy(self)
def enable(self):
self.collNode.setFromCollideMask(ToontownGlobals.WallBitmask)
self.accept('enter' + self.getName(), self._handleEnterCollision)
def disable(self):
self.ignore('enter' + self.getName())
self.collNode.setFromCollideMask(BitMask32(0))
def _handleEnterCollision(self, collEntry):
messenger.send(CogdoMazeExit.EnterEventName, [self])
def onstage(self):
self.unstash()
self.enable()
def offstage(self):
self.stash()
self.disable()
def playerEntersDoor(self, player):
if player not in self._players:
self._players.append(player)
self.toonEnters(player.toon)
def getPlayerCount(self):
return len(self._players)
def hasPlayer(self, player):
return player in self._players
class CogdoMazeWaterCooler(NodePath, DirectObject):
UpdateTaskName = 'CogdoMazeWaterCooler_Update'
def __init__(self, serialNum, model):
NodePath.__init__(self, 'CogdoMazeWaterCooler-%i' % serialNum)
self.serialNum = serialNum
self._model = model
self._model.reparentTo(self)
self._model.setPosHpr(0, 0, 0, 0, 0, 0)
self._initCollisions()
self._initArrow()
self._update = None
self.__startUpdateTask()
return
def destroy(self):
self.ignoreAll()
self.__stopUpdateTask()
self.collNodePath.removeNode()
self.removeNode()
def _initCollisions(self):
offset = Globals.WaterCoolerTriggerOffset
self.collSphere = CollisionSphere(offset[0], offset[1], offset[2], Globals.WaterCoolerTriggerRadius)
self.collSphere.setTangible(0)
name = Globals.WaterCoolerCollisionName
self.collNode = CollisionNode(name)
self.collNode.addSolid(self.collSphere)
self.collNodePath = self.attachNewNode(self.collNode)
def _initArrow(self):
matchingGameGui = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
arrow = matchingGameGui.find('**/minnieArrow')
arrow.setScale(Globals.CoolerArrowScale)
arrow.setColor(*Globals.CoolerArrowColor)
arrow.setPos(0, 0, Globals.CoolerArrowZ)
arrow.setHpr(0, 0, 90)
arrow.setBillboardAxis()
self._arrow = NodePath('Arrow')
arrow.reparentTo(self._arrow)
self._arrow.reparentTo(self)
self._arrowTime = 0
self.accept(Globals.WaterCoolerShowEventName, self.showArrow)
self.accept(Globals.WaterCoolerHideEventName, self.hideArrow)
matchingGameGui.removeNode()
def showArrow(self):
self._arrow.unstash()
def hideArrow(self):
self._arrow.stash()
def update(self, dt):
newZ = math.sin(globalClock.getFrameTime() * Globals.CoolerArrowSpeed) * Globals.CoolerArrowBounce
self._arrow.setZ(newZ)
def __startUpdateTask(self):
self.__stopUpdateTask()
self._update = taskMgr.add(self._updateTask, self.UpdateTaskName, 45)
def __stopUpdateTask(self):
if self._update is not None:
taskMgr.remove(self._update)
return
def _updateTask(self, task):
dt = globalClock.getDt()
self.update(dt)
return Task.cont
|
|
"""
neural network stuff, intended to be used with Lasagne.
Taken from
https://github.com/openai/improved-gan/blob/master/mnist_svhn_cifar10/nn.py
NOTE: Written by Tim Salimans. Does not fall under the BSD license of
the rest of the repository; licensing is unclear.
"""
import numpy as np
import theano as th
import theano.tensor as T
import lasagne
import lasagne.utils
# from lasagne.layers import dnn
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
# T.nnet.relu has some stability issues, this is better
def relu(x):
return T.maximum(x, 0)
def lrelu(x, a=0.2):
return T.maximum(x, a*x)
def centered_softplus(x):
return T.nnet.softplus(x) - np.cast[th.config.floatX](np.log(2.))
def log_sum_exp(x, axis=1):
m = T.max(x, axis=axis)
return m+T.log(T.sum(T.exp(x-m.dimshuffle(0,'x')), axis=axis))
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
class WeightNormLayer(lasagne.layers.Layer):
def __init__(self, incoming, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.),
W=lasagne.init.Normal(0.05), train_g=False, init_stdv=1., nonlinearity=relu, **kwargs):
super(WeightNormLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = nonlinearity
self.init_stdv = init_stdv
k = self.input_shape[1]
if b is not None:
self.b = self.add_param(b, (k,), name="b", regularizable=False)
if g is not None:
self.g = self.add_param(g, (k,), name="g", regularizable=False, trainable=train_g)
if len(self.input_shape)==4:
self.axes_to_sum = (0,2,3)
self.dimshuffle_args = ['x',0,'x','x']
else:
self.axes_to_sum = 0
self.dimshuffle_args = ['x',0]
# scale weights in layer below
incoming.W_param = incoming.W
#incoming.W_param.set_value(W.sample(incoming.W_param.get_value().shape))
if incoming.W_param.ndim==4:
if isinstance(incoming, Deconv2DLayer):
W_axes_to_sum = (0,2,3)
W_dimshuffle_args = ['x',0,'x','x']
else:
W_axes_to_sum = (1,2,3)
W_dimshuffle_args = [0,'x','x','x']
else:
W_axes_to_sum = 0
W_dimshuffle_args = ['x',0]
if g is not None:
incoming.W = incoming.W_param * (self.g/T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum))).dimshuffle(*W_dimshuffle_args)
else:
incoming.W = incoming.W_param / T.sqrt(1e-6 + T.sum(T.square(incoming.W_param),axis=W_axes_to_sum,keepdims=True))
def get_output_for(self, input, init=False, **kwargs):
if init:
m = T.mean(input, self.axes_to_sum)
input -= m.dimshuffle(*self.dimshuffle_args)
inv_stdv = self.init_stdv/T.sqrt(T.mean(T.square(input), self.axes_to_sum))
input *= inv_stdv.dimshuffle(*self.dimshuffle_args)
self.init_updates = [(self.b, -m*inv_stdv), (self.g, self.g*inv_stdv)]
elif hasattr(self,'b'):
input += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(input)
def weight_norm(layer, **kwargs):
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b'):
del layer.params[layer.b]
layer.b = None
return WeightNormLayer(layer, nonlinearity=nonlinearity, **kwargs)
class Deconv2DLayer(lasagne.layers.Layer):
def __init__(self, incoming, target_shape, filter_size, stride=(2, 2),
W=lasagne.init.Normal(0.05), b=lasagne.init.Constant(0.), nonlinearity=relu, **kwargs):
super(Deconv2DLayer, self).__init__(incoming, **kwargs)
self.target_shape = target_shape
self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
self.filter_size = lasagne.utils.as_tuple(filter_size, 2)
self.stride = lasagne.utils.as_tuple(stride, 2)
self.target_shape = target_shape
self.W_shape = (incoming.output_shape[1], target_shape[1], filter_size[0], filter_size[1])
self.W = self.add_param(W, self.W_shape, name="W")
if b is not None:
self.b = self.add_param(b, (target_shape[1],), name="b")
else:
self.b = None
def get_output_for(self, input, **kwargs):
op = T.nnet.abstract_conv.AbstractConv2d_gradInputs(imshp=self.target_shape, kshp=self.W_shape, subsample=self.stride, border_mode='half')
activation = op(self.W, input, self.target_shape[2:])
if self.b is not None:
activation += self.b.dimshuffle('x', 0, 'x', 'x')
return self.nonlinearity(activation)
def get_output_shape_for(self, input_shape):
return self.target_shape
# minibatch discrimination layer
class MinibatchLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_kernels, dim_per_kernel=5, theta=lasagne.init.Normal(0.05),
log_weight_scale=lasagne.init.Constant(0.), b=lasagne.init.Constant(-1.), **kwargs):
super(MinibatchLayer, self).__init__(incoming, **kwargs)
self.num_kernels = num_kernels
num_inputs = int(np.prod(self.input_shape[1:]))
self.theta = self.add_param(theta, (num_inputs, num_kernels, dim_per_kernel), name="theta")
self.log_weight_scale = self.add_param(log_weight_scale, (num_kernels, dim_per_kernel), name="log_weight_scale")
self.W = self.theta * (T.exp(self.log_weight_scale)/T.sqrt(T.sum(T.square(self.theta),axis=0))).dimshuffle('x',0,1)
self.b = self.add_param(b, (num_kernels,), name="b")
def get_output_shape_for(self, input_shape):
return (input_shape[0], np.prod(input_shape[1:])+self.num_kernels)
def get_output_for(self, input, init=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = T.tensordot(input, self.W, [[1], [0]])
abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
+ 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))
if init:
mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
f = T.sum(T.exp(-abs_dif),axis=2)
if init:
mf = T.mean(f,axis=0)
f -= mf.dimshuffle('x',0)
self.init_updates.append((self.b, -mf))
else:
f += self.b.dimshuffle('x',0)
return T.concatenate([input, f], axis=1)
class BatchNormLayer(lasagne.layers.Layer):
def __init__(self, incoming, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.), nonlinearity=relu, **kwargs):
super(BatchNormLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = nonlinearity
k = self.input_shape[1]
if b is not None:
self.b = self.add_param(b, (k,), name="b", regularizable=False)
if g is not None:
self.g = self.add_param(g, (k,), name="g", regularizable=False)
self.avg_batch_mean = self.add_param(lasagne.init.Constant(0.), (k,), name="avg_batch_mean", regularizable=False, trainable=False)
self.avg_batch_var = self.add_param(lasagne.init.Constant(1.), (k,), name="avg_batch_var", regularizable=False, trainable=False)
if len(self.input_shape)==4:
self.axes_to_sum = (0,2,3)
self.dimshuffle_args = ['x',0,'x','x']
else:
self.axes_to_sum = 0
self.dimshuffle_args = ['x',0]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
def batch_norm(layer, b=lasagne.init.Constant(0.), g=lasagne.init.Constant(1.), **kwargs):
"""
adapted from https://gist.github.com/f0k/f1a6bd3c8585c400c190
"""
nonlinearity = getattr(layer, 'nonlinearity', None)
if nonlinearity is not None:
layer.nonlinearity = lasagne.nonlinearities.identity
else:
nonlinearity = lasagne.nonlinearities.identity
if hasattr(layer, 'b'):
del layer.params[layer.b]
layer.b = None
return BatchNormLayer(layer, b, g, nonlinearity=nonlinearity, **kwargs)
class GaussianNoiseLayer(lasagne.layers.Layer):
def __init__(self, incoming, sigma=0.1, **kwargs):
super(GaussianNoiseLayer, self).__init__(incoming, **kwargs)
self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))
self.sigma = sigma
def get_output_for(self, input, deterministic=False, use_last_noise=False, **kwargs):
if deterministic or self.sigma == 0:
return input
else:
if not use_last_noise:
self.noise = self._srng.normal(input.shape, avg=0.0, std=self.sigma)
return input + self.noise
# /////////// older code used for MNIST ////////////
# weight normalization
def l2normalize(layer, train_scale=True):
W_param = layer.W
s = W_param.get_value().shape
if len(s)==4:
axes_to_sum = (1,2,3)
dimshuffle_args = [0,'x','x','x']
k = s[0]
else:
axes_to_sum = 0
dimshuffle_args = ['x',0]
k = s[1]
layer.W_scale = layer.add_param(lasagne.init.Constant(1.),
(k,), name="W_scale", trainable=train_scale, regularizable=False)
layer.W = W_param * (layer.W_scale/T.sqrt(1e-6 + T.sum(T.square(W_param),axis=axes_to_sum))).dimshuffle(*dimshuffle_args)
return layer
# fully connected layer with weight normalization
class DenseLayer(lasagne.layers.Layer):
def __init__(self, incoming, num_units, theta=lasagne.init.Normal(0.1), b=lasagne.init.Constant(0.),
weight_scale=lasagne.init.Constant(1.), train_scale=False, nonlinearity=relu, **kwargs):
super(DenseLayer, self).__init__(incoming, **kwargs)
self.nonlinearity = (lasagne.nonlinearities.identity if nonlinearity is None else nonlinearity)
self.num_units = num_units
num_inputs = int(np.prod(self.input_shape[1:]))
self.theta = self.add_param(theta, (num_inputs, num_units), name="theta")
self.weight_scale = self.add_param(weight_scale, (num_units,), name="weight_scale", trainable=train_scale)
self.W = self.theta * (self.weight_scale/T.sqrt(T.sum(T.square(self.theta),axis=0))).dimshuffle('x',0)
self.b = self.add_param(b, (num_units,), name="b")
def get_output_shape_for(self, input_shape):
return (input_shape[0], self.num_units)
def get_output_for(self, input, init=False, deterministic=False, **kwargs):
if input.ndim > 2:
# if the input has more than two dimensions, flatten it into a
# batch of feature vectors.
input = input.flatten(2)
activation = T.dot(input, self.W)
if init:
ma = T.mean(activation, axis=0)
activation -= ma.dimshuffle('x',0)
stdv = T.sqrt(T.mean(T.square(activation),axis=0))
activation /= stdv.dimshuffle('x',0)
self.init_updates = [(self.weight_scale, self.weight_scale/stdv), (self.b, -ma/stdv)]
else:
activation += self.b.dimshuffle('x', 0)
return self.nonlinearity(activation)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The system for scheduling tasks and executing them in order.
Deals with dependencies, priorities, resources, etc.
The :py:class:`~luigi.worker.Worker` pulls tasks from the scheduler (usually over the REST interface) and executes them.
See :doc:`/central_scheduler` for more info.
"""
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import functools
import itertools
import logging
import os
import re
import time
from luigi import six
from luigi import configuration
from luigi import notifications
from luigi import parameter
from luigi import task_history as history
from luigi.task_status import DISABLED, DONE, FAILED, PENDING, RUNNING, SUSPENDED, UNKNOWN
from luigi.task import Config
logger = logging.getLogger(__name__)
class Scheduler(object):
"""
Abstract base class.
Note that the methods all take string arguments, not Task objects...
"""""
add_task = NotImplemented
get_work = NotImplemented
ping = NotImplemented
UPSTREAM_RUNNING = 'UPSTREAM_RUNNING'
UPSTREAM_MISSING_INPUT = 'UPSTREAM_MISSING_INPUT'
UPSTREAM_FAILED = 'UPSTREAM_FAILED'
UPSTREAM_DISABLED = 'UPSTREAM_DISABLED'
UPSTREAM_SEVERITY_ORDER = (
'',
UPSTREAM_RUNNING,
UPSTREAM_MISSING_INPUT,
UPSTREAM_FAILED,
UPSTREAM_DISABLED,
)
UPSTREAM_SEVERITY_KEY = UPSTREAM_SEVERITY_ORDER.index
STATUS_TO_UPSTREAM_MAP = {
FAILED: UPSTREAM_FAILED,
RUNNING: UPSTREAM_RUNNING,
PENDING: UPSTREAM_MISSING_INPUT,
DISABLED: UPSTREAM_DISABLED,
}
TASK_FAMILY_RE = re.compile(r'([^(_]+)[(_]')
class scheduler(Config):
# TODO(erikbern): the config_path is needed for backwards compatilibity. We
# should drop the compatibility at some point
retry_delay = parameter.FloatParameter(default=900.0)
remove_delay = parameter.FloatParameter(default=600.0)
worker_disconnect_delay = parameter.FloatParameter(default=60.0)
state_path = parameter.Parameter(default='/var/lib/luigi-server/state.pickle')
# Jobs are disabled if we see more than disable_failures failures in disable_window seconds.
# These disables last for disable_persist seconds.
disable_window = parameter.IntParameter(default=3600,
config_path=dict(section='scheduler', name='disable-window-seconds'))
disable_failures = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable-num-failures'))
disable_hard_timeout = parameter.IntParameter(default=999999999,
config_path=dict(section='scheduler', name='disable-hard-timeout'))
disable_persist = parameter.IntParameter(default=86400,
config_path=dict(section='scheduler', name='disable-persist-seconds'))
max_shown_tasks = parameter.IntParameter(default=100000)
max_graph_nodes = parameter.IntParameter(default=100000)
record_task_history = parameter.BoolParameter(default=False)
prune_on_get_work = parameter.BoolParameter(default=False)
class Failures(object):
"""
This class tracks the number of failures in a given time window.
Failures added are marked with the current timestamp, and this class counts
the number of failures in a sliding time window ending at the present.
"""
def __init__(self, window):
"""
Initialize with the given window.
:param window: how long to track failures for, as a float (number of seconds).
"""
self.window = window
self.failures = collections.deque()
self.first_failure_time = None
def add_failure(self):
"""
Add a failure event with the current timestamp.
"""
failure_time = time.time()
if not self.first_failure_time:
self.first_failure_time = failure_time
self.failures.append(failure_time)
def num_failures(self):
"""
Return the number of failures in the window.
"""
min_time = time.time() - self.window
while self.failures and self.failures[0] < min_time:
self.failures.popleft()
return len(self.failures)
def clear(self):
"""
Clear the failure queue.
"""
self.failures.clear()
def _get_default(x, default):
if x is not None:
return x
else:
return default
class Task(object):
def __init__(self, task_id, status, deps, resources=None, priority=0, family='', module=None,
params=None, disable_failures=None, disable_window=None, disable_hard_timeout=None,
tracking_url=None, status_message=None):
self.id = task_id
self.stakeholders = set() # workers ids that are somehow related to this task (i.e. don't prune while any of these workers are still active)
self.workers = set() # workers ids that can perform task - task is 'BROKEN' if none of these workers are active
if deps is None:
self.deps = set()
else:
self.deps = set(deps)
self.status = status # PENDING, RUNNING, FAILED or DONE
self.time = time.time() # Timestamp when task was first added
self.updated = self.time
self.retry = None
self.remove = None
self.worker_running = None # the worker id that is currently running the task or None
self.time_running = None # Timestamp when picked up by worker
self.expl = None
self.priority = priority
self.resources = _get_default(resources, {})
self.family = family
self.module = module
self.params = _get_default(params, {})
self.disable_failures = disable_failures
self.disable_hard_timeout = disable_hard_timeout
self.failures = Failures(disable_window)
self.tracking_url = tracking_url
self.status_message = status_message
self.scheduler_disable_time = None
self.runnable = False
def __repr__(self):
return "Task(%r)" % vars(self)
def add_failure(self):
self.failures.add_failure()
def has_excessive_failures(self):
if self.failures.first_failure_time is not None:
if (time.time() >= self.failures.first_failure_time +
self.disable_hard_timeout):
return True
if self.failures.num_failures() >= self.disable_failures:
return True
return False
@property
def pretty_id(self):
param_str = ', '.join('{}={}'.format(key, value) for key, value in self.params.items())
return '{}({})'.format(self.family, param_str)
class Worker(object):
"""
Structure for tracking worker activity and keeping their references.
"""
def __init__(self, worker_id, last_active=None):
self.id = worker_id
self.reference = None # reference to the worker in the real world. (Currently a dict containing just the host)
self.last_active = last_active or time.time() # seconds since epoch
self.last_get_work = None
self.started = time.time() # seconds since epoch
self.tasks = set() # task objects
self.info = {}
self.disabled = False
def add_info(self, info):
self.info.update(info)
def update(self, worker_reference, get_work=False):
if worker_reference:
self.reference = worker_reference
self.last_active = time.time()
if get_work:
self.last_get_work = time.time()
def prune(self, config):
# Delete workers that haven't said anything for a while (probably killed)
if self.last_active + config.worker_disconnect_delay < time.time():
return True
def get_pending_tasks(self, state):
"""
Get PENDING (and RUNNING) tasks for this worker.
You have to pass in the state for optimization reasons.
"""
if len(self.tasks) < state.num_pending_tasks():
return six.moves.filter(lambda task: task.status in [PENDING, RUNNING],
self.tasks)
else:
return state.get_pending_tasks()
def is_trivial_worker(self, state):
"""
If it's not an assistant having only tasks that are without
requirements.
We have to pass the state parameter for optimization reasons.
"""
if self.assistant:
return False
return all(not task.resources for task in self.get_pending_tasks(state))
@property
def assistant(self):
return self.info.get('assistant', False)
def __str__(self):
return self.id
class SimpleTaskState(object):
"""
Keep track of the current state and handle persistance.
The point of this class is to enable other ways to keep state, eg. by using a database
These will be implemented by creating an abstract base class that this and other classes
inherit from.
"""
def __init__(self, state_path):
self._state_path = state_path
self._tasks = {} # map from id to a Task object
self._status_tasks = collections.defaultdict(dict)
self._active_workers = {} # map from id to a Worker object
def get_state(self):
return self._tasks, self._active_workers
def set_state(self, state):
self._tasks, self._active_workers = state
def dump(self):
try:
with open(self._state_path, 'wb') as fobj:
pickle.dump(self.get_state(), fobj)
except IOError:
logger.warning("Failed saving scheduler state", exc_info=1)
else:
logger.info("Saved state in %s", self._state_path)
# prone to lead to crashes when old state is unpickled with updated code. TODO some kind of version control?
def load(self):
if os.path.exists(self._state_path):
logger.info("Attempting to load state from %s", self._state_path)
try:
with open(self._state_path, 'rb') as fobj:
state = pickle.load(fobj)
except BaseException:
logger.exception("Error when loading state. Starting from empty state.")
return
self.set_state(state)
self._status_tasks = collections.defaultdict(dict)
for task in six.itervalues(self._tasks):
self._status_tasks[task.status][task.id] = task
else:
logger.info("No prior state file exists at %s. Starting with empty state", self._state_path)
def get_active_tasks(self, status=None):
if status:
for task in six.itervalues(self._status_tasks[status]):
yield task
else:
for task in six.itervalues(self._tasks):
yield task
def get_running_tasks(self):
return six.itervalues(self._status_tasks[RUNNING])
def get_pending_tasks(self):
return itertools.chain.from_iterable(six.itervalues(self._status_tasks[status])
for status in [PENDING, RUNNING])
def num_pending_tasks(self):
"""
Return how many tasks are PENDING + RUNNING. O(1).
"""
return len(self._status_tasks[PENDING]) + len(self._status_tasks[RUNNING])
def get_task(self, task_id, default=None, setdefault=None):
if setdefault:
task = self._tasks.setdefault(task_id, setdefault)
self._status_tasks[task.status][task.id] = task
return task
else:
return self._tasks.get(task_id, default)
def has_task(self, task_id):
return task_id in self._tasks
def re_enable(self, task, config=None):
task.scheduler_disable_time = None
task.failures.clear()
if config:
self.set_status(task, FAILED, config)
task.failures.clear()
def set_status(self, task, new_status, config=None):
if new_status == FAILED:
assert config is not None
if new_status == DISABLED and task.status == RUNNING:
return
if task.status == DISABLED:
if new_status == DONE:
self.re_enable(task)
# don't allow workers to override a scheduler disable
elif task.scheduler_disable_time is not None and new_status != DISABLED:
return
if new_status == FAILED and task.status != DISABLED:
task.add_failure()
if task.has_excessive_failures():
task.scheduler_disable_time = time.time()
new_status = DISABLED
notifications.send_error_email(
'Luigi Scheduler: DISABLED {task} due to excessive failures'.format(task=task.id),
'{task} failed {failures} times in the last {window} seconds, so it is being '
'disabled for {persist} seconds'.format(
failures=config.disable_failures,
task=task.id,
window=config.disable_window,
persist=config.disable_persist,
))
elif new_status == DISABLED:
task.scheduler_disable_time = None
if new_status != task.status:
self._status_tasks[task.status].pop(task.id)
self._status_tasks[new_status][task.id] = task
task.status = new_status
task.updated = time.time()
def fail_dead_worker_task(self, task, config, assistants):
# If a running worker disconnects, tag all its jobs as FAILED and subject it to the same retry logic
if task.status == RUNNING and task.worker_running and task.worker_running not in task.stakeholders | assistants:
logger.info("Task %r is marked as running by disconnected worker %r -> marking as "
"FAILED with retry delay of %rs", task.id, task.worker_running,
config.retry_delay)
task.worker_running = None
self.set_status(task, FAILED, config)
task.retry = time.time() + config.retry_delay
def update_status(self, task, config):
# Mark tasks with no remaining active stakeholders for deletion
if not task.stakeholders:
if task.remove is None:
logger.debug("Task %r has stakeholders %r but none remain connected -> might remove "
"task in %s seconds", task.id, task.stakeholders, config.remove_delay)
task.remove = time.time() + config.remove_delay
# Re-enable task after the disable time expires
if task.status == DISABLED and task.scheduler_disable_time is not None:
if time.time() - task.scheduler_disable_time > config.disable_persist:
self.re_enable(task, config)
# Reset FAILED tasks to PENDING if max timeout is reached, and retry delay is >= 0
if task.status == FAILED and config.retry_delay >= 0 and task.retry < time.time():
self.set_status(task, PENDING, config)
def may_prune(self, task):
return task.remove and time.time() > task.remove
def inactivate_tasks(self, delete_tasks):
# The terminology is a bit confusing: we used to "delete" tasks when they became inactive,
# but with a pluggable state storage, you might very well want to keep some history of
# older tasks as well. That's why we call it "inactivate" (as in the verb)
for task in delete_tasks:
task_obj = self._tasks.pop(task)
self._status_tasks[task_obj.status].pop(task)
def get_active_workers(self, last_active_lt=None, last_get_work_gt=None):
for worker in six.itervalues(self._active_workers):
if last_active_lt is not None and worker.last_active >= last_active_lt:
continue
last_get_work = getattr(worker, 'last_get_work', None)
if last_get_work_gt is not None and (
last_get_work is None or last_get_work <= last_get_work_gt):
continue
yield worker
def get_assistants(self, last_active_lt=None):
return filter(lambda w: w.assistant, self.get_active_workers(last_active_lt))
def get_worker_ids(self):
return self._active_workers.keys() # only used for unit tests
def get_worker(self, worker_id):
return self._active_workers.setdefault(worker_id, Worker(worker_id))
def inactivate_workers(self, delete_workers):
# Mark workers as inactive
for worker in delete_workers:
self._active_workers.pop(worker)
self._remove_workers_from_tasks(delete_workers)
def _remove_workers_from_tasks(self, workers, remove_stakeholders=True):
for task in self.get_active_tasks():
if remove_stakeholders:
task.stakeholders.difference_update(workers)
task.workers.difference_update(workers)
def disable_workers(self, workers):
self._remove_workers_from_tasks(workers, remove_stakeholders=False)
for worker in workers:
self.get_worker(worker).disabled = True
def get_necessary_tasks(self):
necessary_tasks = set()
for task in self.get_active_tasks():
if task.status not in (DONE, DISABLED, UNKNOWN) or \
task.scheduler_disable_time is not None:
necessary_tasks.update(task.deps)
necessary_tasks.add(task.id)
return necessary_tasks
class CentralPlannerScheduler(Scheduler):
"""
Async scheduler that can handle multiple workers, etc.
Can be run locally or on a server (using RemoteScheduler + server.Server).
"""
def __init__(self, config=None, resources=None, task_history_impl=None, **kwargs):
"""
Keyword Arguments:
:param config: an object of class "scheduler" or None (in which the global instance will be used)
:param resources: a dict of str->int constraints
:param task_history_impl: ignore config and use this object as the task history
"""
self._config = config or scheduler(**kwargs)
self._state = SimpleTaskState(self._config.state_path)
if task_history_impl:
self._task_history = task_history_impl
elif self._config.record_task_history:
from luigi import db_task_history # Needs sqlalchemy, thus imported here
self._task_history = db_task_history.DbTaskHistory()
else:
self._task_history = history.NopHistory()
self._resources = resources or configuration.get_config().getintdict('resources') # TODO: Can we make this a Parameter?
self._make_task = functools.partial(
Task, disable_failures=self._config.disable_failures,
disable_hard_timeout=self._config.disable_hard_timeout,
disable_window=self._config.disable_window)
self._worker_requests = {}
def load(self):
self._state.load()
def dump(self):
self._state.dump()
def prune(self):
logger.info("Starting pruning of task graph")
self._prune_workers()
self._prune_tasks()
logger.info("Done pruning task graph")
def _prune_workers(self):
remove_workers = []
for worker in self._state.get_active_workers():
if worker.prune(self._config):
logger.debug("Worker %s timed out (no contact for >=%ss)", worker, self._config.worker_disconnect_delay)
remove_workers.append(worker.id)
self._state.inactivate_workers(remove_workers)
def _prune_tasks(self):
assistant_ids = set(w.id for w in self._state.get_assistants())
remove_tasks = []
if assistant_ids:
necessary_tasks = self._state.get_necessary_tasks()
else:
necessary_tasks = ()
for task in self._state.get_active_tasks():
self._state.fail_dead_worker_task(task, self._config, assistant_ids)
self._state.update_status(task, self._config)
if self._state.may_prune(task) and task.id not in necessary_tasks:
logger.info("Removing task %r", task.id)
remove_tasks.append(task.id)
self._state.inactivate_tasks(remove_tasks)
def update(self, worker_id, worker_reference=None, get_work=False):
"""
Keep track of whenever the worker was last active.
"""
worker = self._state.get_worker(worker_id)
worker.update(worker_reference, get_work=get_work)
return not getattr(worker, 'disabled', False)
def _update_priority(self, task, prio, worker):
"""
Update priority of the given task.
Priority can only be increased.
If the task doesn't exist, a placeholder task is created to preserve priority when the task is later scheduled.
"""
task.priority = prio = max(prio, task.priority)
for dep in task.deps or []:
t = self._state.get_task(dep)
if t is not None and prio > t.priority:
self._update_priority(t, prio, worker)
def add_task(self, task_id=None, status=PENDING, runnable=True,
deps=None, new_deps=None, expl=None, resources=None,
priority=0, family='', module=None, params=None,
assistant=False, tracking_url=None, **kwargs):
"""
* add task identified by task_id if it doesn't exist
* if deps is not None, update dependency list
* update status of task
* add additional workers/stakeholders
* update priority when needed
"""
worker_id = kwargs['worker']
worker_enabled = self.update(worker_id)
if worker_enabled:
_default_task = self._make_task(
task_id=task_id, status=PENDING, deps=deps, resources=resources,
priority=priority, family=family, module=module, params=params,
)
else:
_default_task = None
task = self._state.get_task(task_id, setdefault=_default_task)
if task is None or (task.status != RUNNING and not worker_enabled):
return
# for setting priority, we'll sometimes create tasks with unset family and params
if not task.family:
task.family = family
if not getattr(task, 'module', None):
task.module = module
if not task.params:
task.params = _get_default(params, {})
if tracking_url is not None or task.status != RUNNING:
task.tracking_url = tracking_url
if task.remove is not None:
task.remove = None # unmark task for removal so it isn't removed after being added
if expl is not None:
task.expl = expl
if not (task.status == RUNNING and status == PENDING) or new_deps:
# don't allow re-scheduling of task while it is running, it must either fail or succeed first
if status == PENDING or status != task.status:
# Update the DB only if there was a acctual change, to prevent noise.
# We also check for status == PENDING b/c that's the default value
# (so checking for status != task.status woule lie)
self._update_task_history(task, status)
self._state.set_status(task, PENDING if status == SUSPENDED else status, self._config)
if status == FAILED:
task.retry = self._retry_time(task, self._config)
if deps is not None:
task.deps = set(deps)
if new_deps is not None:
task.deps.update(new_deps)
if resources is not None:
task.resources = resources
if worker_enabled and not assistant:
task.stakeholders.add(worker_id)
# Task dependencies might not exist yet. Let's create dummy tasks for them for now.
# Otherwise the task dependencies might end up being pruned if scheduling takes a long time
for dep in task.deps or []:
t = self._state.get_task(dep, setdefault=self._make_task(task_id=dep, status=UNKNOWN, deps=None, priority=priority))
t.stakeholders.add(worker_id)
self._update_priority(task, priority, worker_id)
if runnable and status != FAILED and worker_enabled:
task.workers.add(worker_id)
self._state.get_worker(worker_id).tasks.add(task)
task.runnable = runnable
def add_worker(self, worker, info, **kwargs):
self._state.get_worker(worker).add_info(info)
def disable_worker(self, worker):
self._state.disable_workers({worker})
def update_resources(self, **resources):
if self._resources is None:
self._resources = {}
self._resources.update(resources)
def _has_resources(self, needed_resources, used_resources):
if needed_resources is None:
return True
available_resources = self._resources or {}
for resource, amount in six.iteritems(needed_resources):
if amount + used_resources[resource] > available_resources.get(resource, 1):
return False
return True
def _used_resources(self):
used_resources = collections.defaultdict(int)
if self._resources is not None:
for task in self._state.get_active_tasks(status=RUNNING):
if task.resources:
for resource, amount in six.iteritems(task.resources):
used_resources[resource] += amount
return used_resources
def _rank(self, task):
"""
Return worker's rank function for task scheduling.
:return:
"""
return task.priority, -task.time
def _schedulable(self, task):
if task.status != PENDING:
return False
for dep in task.deps:
dep_task = self._state.get_task(dep, default=None)
if dep_task is None or dep_task.status != DONE:
return False
return True
def _retry_time(self, task, config):
return time.time() + config.retry_delay
def get_work(self, host=None, assistant=False, current_tasks=None, **kwargs):
# TODO: remove any expired nodes
# Algo: iterate over all nodes, find the highest priority node no dependencies and available
# resources.
# Resource checking looks both at currently available resources and at which resources would
# be available if all running tasks died and we rescheduled all workers greedily. We do both
# checks in order to prevent a worker with many low-priority tasks from starving other
# workers with higher priority tasks that share the same resources.
# TODO: remove tasks that can't be done, figure out if the worker has absolutely
# nothing it can wait for
if self._config.prune_on_get_work:
self.prune()
worker_id = kwargs['worker']
# Return remaining tasks that have no FAILED descendants
self.update(worker_id, {'host': host}, get_work=True)
if assistant:
self.add_worker(worker_id, [('assistant', assistant)])
best_task = None
if current_tasks is not None:
ct_set = set(current_tasks)
for task in sorted(self._state.get_running_tasks(), key=self._rank):
if task.worker_running == worker_id and task.id not in ct_set:
best_task = task
locally_pending_tasks = 0
running_tasks = []
upstream_table = {}
greedy_resources = collections.defaultdict(int)
n_unique_pending = 0
worker = self._state.get_worker(worker_id)
if worker.is_trivial_worker(self._state):
relevant_tasks = worker.get_pending_tasks(self._state)
used_resources = collections.defaultdict(int)
greedy_workers = dict() # If there's no resources, then they can grab any task
else:
relevant_tasks = self._state.get_pending_tasks()
used_resources = self._used_resources()
activity_limit = time.time() - self._config.worker_disconnect_delay
active_workers = self._state.get_active_workers(last_get_work_gt=activity_limit)
greedy_workers = dict((worker.id, worker.info.get('workers', 1))
for worker in active_workers)
tasks = list(relevant_tasks)
tasks.sort(key=self._rank, reverse=True)
for task in tasks:
in_workers = (assistant and getattr(task, 'runnable', bool(task.workers))) or worker_id in task.workers
if task.status == RUNNING and in_workers:
# Return a list of currently running tasks to the client,
# makes it easier to troubleshoot
other_worker = self._state.get_worker(task.worker_running)
more_info = {'task_id': task.id, 'worker': str(other_worker)}
if other_worker is not None:
more_info.update(other_worker.info)
running_tasks.append(more_info)
if task.status == PENDING and in_workers:
upstream_status = self._upstream_status(task.id, upstream_table)
if upstream_status != UPSTREAM_DISABLED:
locally_pending_tasks += 1
if len(task.workers) == 1 and not assistant:
n_unique_pending += 1
if best_task:
continue
if task.status == RUNNING and (task.worker_running in greedy_workers):
greedy_workers[task.worker_running] -= 1
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
if self._schedulable(task) and self._has_resources(task.resources, greedy_resources):
if in_workers and self._has_resources(task.resources, used_resources):
best_task = task
else:
workers = itertools.chain(task.workers, [worker_id]) if assistant else task.workers
for task_worker in workers:
if greedy_workers.get(task_worker, 0) > 0:
# use up a worker
greedy_workers[task_worker] -= 1
# keep track of the resources used in greedy scheduling
for resource, amount in six.iteritems((task.resources or {})):
greedy_resources[resource] += amount
break
reply = {'n_pending_tasks': locally_pending_tasks,
'running_tasks': running_tasks,
'task_id': None,
'n_unique_pending': n_unique_pending}
if best_task:
self._state.set_status(best_task, RUNNING, self._config)
best_task.worker_running = worker_id
best_task.time_running = time.time()
self._update_task_history(best_task, RUNNING, host=host)
reply['task_id'] = best_task.id
reply['task_family'] = best_task.family
reply['task_module'] = getattr(best_task, 'module', None)
reply['task_params'] = best_task.params
return reply
def ping(self, **kwargs):
worker_id = kwargs['worker']
self.update(worker_id)
def _upstream_status(self, task_id, upstream_status_table):
if task_id in upstream_status_table:
return upstream_status_table[task_id]
elif self._state.has_task(task_id):
task_stack = [task_id]
while task_stack:
dep_id = task_stack.pop()
dep = self._state.get_task(dep_id)
if dep:
if dep.status == DONE:
continue
if dep_id not in upstream_status_table:
if dep.status == PENDING and dep.deps:
task_stack += [dep_id] + list(dep.deps)
upstream_status_table[dep_id] = '' # will be updated postorder
else:
dep_status = STATUS_TO_UPSTREAM_MAP.get(dep.status, '')
upstream_status_table[dep_id] = dep_status
elif upstream_status_table[dep_id] == '' and dep.deps:
# This is the postorder update step when we set the
# status based on the previously calculated child elements
status = max((upstream_status_table.get(a_task_id, '')
for a_task_id in dep.deps),
key=UPSTREAM_SEVERITY_KEY)
upstream_status_table[dep_id] = status
return upstream_status_table[dep_id]
def _serialize_task(self, task_id, include_deps=True, deps=None):
task = self._state.get_task(task_id)
ret = {
'display_name': task.pretty_id,
'status': task.status,
'workers': list(task.workers),
'worker_running': task.worker_running,
'time_running': getattr(task, "time_running", None),
'start_time': task.time,
'last_updated': getattr(task, "updated", task.time),
'params': task.params,
'name': task.family,
'priority': task.priority,
'resources': task.resources,
'tracking_url': getattr(task, "tracking_url", None),
'status_message': getattr(task, "status_message", None)
}
if task.status == DISABLED:
ret['re_enable_able'] = task.scheduler_disable_time is not None
if include_deps:
ret['deps'] = list(task.deps if deps is None else deps)
return ret
def graph(self, **kwargs):
self.prune()
serialized = {}
seen = set()
for task in self._state.get_active_tasks():
serialized.update(self._traverse_graph(task.id, seen))
return serialized
def _filter_done(self, task_ids):
for task_id in task_ids:
task = self._state.get_task(task_id)
if task is None or task.status != DONE:
yield task_id
def _traverse_graph(self, root_task_id, seen=None, dep_func=None, include_done=True):
""" Returns the dependency graph rooted at task_id
This does a breadth-first traversal to find the nodes closest to the
root before hitting the scheduler.max_graph_nodes limit.
:param root_task_id: the id of the graph's root
:return: A map of task id to serialized node
"""
if seen is None:
seen = set()
elif root_task_id in seen:
return {}
if dep_func is None:
def dep_func(t):
return t.deps
seen.add(root_task_id)
serialized = {}
queue = collections.deque([root_task_id])
while queue:
task_id = queue.popleft()
task = self._state.get_task(task_id)
if task is None or not task.family:
logger.debug('Missing task for id [%s]', task_id)
# NOTE : If a dependency is missing from self._state there is no way to deduce the
# task family and parameters.
family_match = TASK_FAMILY_RE.match(task_id)
family = family_match.group(1) if family_match else UNKNOWN
params = {'task_id': task_id}
serialized[task_id] = {
'deps': [],
'status': UNKNOWN,
'workers': [],
'start_time': UNKNOWN,
'params': params,
'name': family,
'display_name': task_id,
'priority': 0,
}
else:
deps = dep_func(task)
if not include_done:
deps = list(self._filter_done(deps))
serialized[task_id] = self._serialize_task(task_id, deps=deps)
for dep in sorted(deps):
if dep not in seen:
seen.add(dep)
queue.append(dep)
if task_id != root_task_id:
del serialized[task_id]['display_name']
if len(serialized) >= self._config.max_graph_nodes:
break
return serialized
def dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
return self._traverse_graph(task_id, include_done=include_done)
def inverse_dep_graph(self, task_id, include_done=True, **kwargs):
self.prune()
if not self._state.has_task(task_id):
return {}
inverse_graph = collections.defaultdict(set)
for task in self._state.get_active_tasks():
for dep in task.deps:
inverse_graph[dep].add(task.id)
return self._traverse_graph(
task_id, dep_func=lambda t: inverse_graph[t.id], include_done=include_done)
def task_list(self, status, upstream_status, limit=True, search=None, **kwargs):
"""
Query for a subset of tasks by status.
"""
self.prune()
result = {}
upstream_status_table = {} # used to memoize upstream status
if search is None:
def filter_func(_):
return True
else:
terms = search.split()
def filter_func(t):
return all(term in t.pretty_id for term in terms)
for task in filter(filter_func, self._state.get_active_tasks(status)):
if (task.status != PENDING or not upstream_status or
upstream_status == self._upstream_status(task.id, upstream_status_table)):
serialized = self._serialize_task(task.id, False)
result[task.id] = serialized
if limit and len(result) > self._config.max_shown_tasks:
return {'num_tasks': len(result)}
return result
def _first_task_display_name(self, worker):
task_id = worker.info.get('first_task', '')
if self._state.has_task(task_id):
return self._state.get_task(task_id).pretty_id
else:
return task_id
def worker_list(self, include_running=True, **kwargs):
self.prune()
workers = [
dict(
name=worker.id,
last_active=worker.last_active,
started=getattr(worker, 'started', None),
first_task_display_name=self._first_task_display_name(worker),
**worker.info
) for worker in self._state.get_active_workers()]
workers.sort(key=lambda worker: worker['started'], reverse=True)
if include_running:
running = collections.defaultdict(dict)
num_pending = collections.defaultdict(int)
num_uniques = collections.defaultdict(int)
for task in self._state.get_pending_tasks():
if task.status == RUNNING and task.worker_running:
running[task.worker_running][task.id] = self._serialize_task(task.id, False)
elif task.status == PENDING:
for worker in task.workers:
num_pending[worker] += 1
if len(task.workers) == 1:
num_uniques[list(task.workers)[0]] += 1
for worker in workers:
tasks = running[worker['name']]
worker['num_running'] = len(tasks)
worker['num_pending'] = num_pending[worker['name']]
worker['num_uniques'] = num_uniques[worker['name']]
worker['running'] = tasks
return workers
def resource_list(self):
"""
Resources usage info and their consumers (tasks).
"""
self.prune()
resources = [
dict(
name=resource,
num_total=r_dict['total'],
num_used=r_dict['used']
) for resource, r_dict in six.iteritems(self.resources())]
if self._resources is not None:
consumers = collections.defaultdict(dict)
for task in self._state.get_running_tasks():
if task.status == RUNNING and task.resources:
for resource, amount in six.iteritems(task.resources):
consumers[resource][task.id] = self._serialize_task(task.id, False)
for resource in resources:
tasks = consumers[resource['name']]
resource['num_consumer'] = len(tasks)
resource['running'] = tasks
return resources
def resources(self):
''' get total resources and available ones '''
used_resources = self._used_resources()
ret = collections.defaultdict(dict)
for resource, total in six.iteritems(self._resources):
ret[resource]['total'] = total
if resource in used_resources:
ret[resource]['used'] = used_resources[resource]
else:
ret[resource]['used'] = 0
return ret
def task_search(self, task_str, **kwargs):
"""
Query for a subset of tasks by task_id.
:param task_str:
:return:
"""
self.prune()
result = collections.defaultdict(dict)
for task in self._state.get_active_tasks():
if task.id.find(task_str) != -1:
serialized = self._serialize_task(task.id, False)
result[task.status][task.id] = serialized
return result
def re_enable_task(self, task_id):
serialized = {}
task = self._state.get_task(task_id)
if task and task.status == DISABLED and task.scheduler_disable_time:
self._state.re_enable(task, self._config)
serialized = self._serialize_task(task_id)
return serialized
def fetch_error(self, task_id, **kwargs):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "error": task.expl, 'displayName': task.pretty_id}
else:
return {"taskId": task_id, "error": ""}
def set_task_status_message(self, task_id, status_message):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
task.status_message = status_message
def get_task_status_message(self, task_id):
if self._state.has_task(task_id):
task = self._state.get_task(task_id)
return {"taskId": task_id, "statusMessage": task.status_message}
else:
return {"taskId": task_id, "statusMessage": ""}
def _update_task_history(self, task, status, host=None):
try:
if status == DONE or status == FAILED:
successful = (status == DONE)
self._task_history.task_finished(task, successful)
elif status == PENDING:
self._task_history.task_scheduled(task)
elif status == RUNNING:
self._task_history.task_started(task, host)
except BaseException:
logger.warning("Error saving Task history", exc_info=True)
@property
def task_history(self):
# Used by server.py to expose the calls
return self._task_history
|
|
# coding=utf-8
# Copyright 2021 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Microbenchmarks for tokenizers on IMDB dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import six
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow_text.python import ops as text_ops
from tensorflow_text.python.benchmarks import benchmark_utils
from tensorflow_text.python.ops.bert_tokenizer import BasicTokenizer
FLAGS = flags.FLAGS
flags.DEFINE_integer("run_iters", 1000, "Number of iterations to run")
flags.DEFINE_integer("burn_iters", 10, "Number of warmup runs")
flags.DEFINE_integer("batch_size", 32, "The size of a batch")
flags.DEFINE_boolean("run_eagerly", True, "Run in eager mode")
flags.DEFINE_boolean(
"use_tf_function", True,
"Wraps the op in a tf.function. Only works when eager mode is enabled")
flags.DEFINE_boolean(
"ragged_vs_dense", False,
"Run the tokenizers using ragged inputs and its dense counterpart")
flags.DEFINE_boolean("xprof_tracing", False, "Enables xprof tracing")
flags.DEFINE_boolean("with_offsets", False,
"Runs the tokenize_with_offsets op instead of tokenize")
# These are needed when generating the parameterized benchmarks and cannot use
# absl FLAGS
_BERT_VOCAB_PATH = "third_party/tensorflow_text/python/benchmarks/test_data/uncased_L-12_H-768_A-12/vocab.txt"
_HUB_MODULE_HANDLE = "third_party/tensorflow_text/python/ops/test_data/segmenter_hub_module"
_SENTENCEPIECE_MODEL_FILE = "third_party/tensorflow_text/python/ops/test_data/test_oss_model.model"
class TokenizationBenchmark(
six.with_metaclass(benchmark.ParameterizedBenchmark,
benchmark_utils.OpsBaseBenchmark)):
"""Benchmarks for tokenizers."""
def __init__(self):
if not FLAGS.run_eagerly:
ops.disable_eager_execution()
self.use_tf_function = FLAGS.use_tf_function
self.load_input_data(FLAGS.batch_size)
# Tokenizers to benchmark which do not require a special/extra input can be
# added here as parameters to "_benchmark_parameters".
# This method assumes the tokenizers given implement the Tokenizer class and
# will run benchmarks for the "tokenize" and "tokenize_with_offsets" methods.
# The parameters for each tokenizers are:
# - The tokenizer name
# - The tokenizer class to instantiate
# - The kwargs used in instantiating and initialization of the tokenizer
_benchmark_parameters = [
("whitespace_tokenizer", text_ops.WhitespaceTokenizer),
("unicode_script_tokenizer", text_ops.UnicodeScriptTokenizer),
("unicode_char_tokenizer", text_ops.UnicodeCharTokenizer),
("bert_tokenizer", text_ops.BertTokenizer, {
"vocab_lookup_table": _BERT_VOCAB_PATH,
"token_out_type": dtypes.int32,
"lower_case": False
}),
("hub_module_tokenizer", text_ops.HubModuleTokenizer, {
"hub_module_handle": _HUB_MODULE_HANDLE
}),
("basic_tokenizer", BasicTokenizer),
]
def benchmark(self, tokenizer, kwargs=None):
tokenizer = tokenizer(**(kwargs or {}))
op = tokenizer.tokenize_with_offsets if FLAGS.with_offsets else tokenizer.tokenize
if FLAGS.ragged_vs_dense:
self.run_and_report_ragged_vs_dense(
op,
FLAGS.run_iters,
FLAGS.burn_iters,
xprof_enabled=FLAGS.xprof_tracing)
return
self.run_and_report(
op,
FLAGS.run_iters,
FLAGS.burn_iters,
xprof_enabled=FLAGS.xprof_tracing)
class CustomInputTokenizationBenchmark(benchmark_utils.OpsBaseBenchmark):
"""Benchmarks for tokenizers that require extra preprocessing or inputs."""
def __init__(self):
if not FLAGS.run_eagerly:
ops.disable_eager_execution()
self.use_tf_function = FLAGS.use_tf_function
self.load_input_data(FLAGS.batch_size)
def _create_table(self, vocab, num_oov=100):
init = lookup_ops.TextFileIdTableInitializer(vocab)
return lookup_ops.StaticVocabularyTableV1(init, num_oov)
def _run(self, tokenizer, kwargs=None):
op = tokenizer.tokenize_with_offsets if FLAGS.with_offsets else tokenizer.tokenize
if FLAGS.ragged_vs_dense:
self.run_and_report_ragged_vs_dense(
op,
FLAGS.run_iters,
FLAGS.burn_iters,
xprof_enabled=FLAGS.xprof_tracing,
**(kwargs or {}))
self.run_and_report(
op,
FLAGS.run_iters,
FLAGS.burn_iters,
xprof_enabled=FLAGS.xprof_tracing,
**(kwargs or {}))
def benchmark_wordpiece_tokenizer(self):
self.input_data = text_ops.WhitespaceTokenizer().tokenize(self.input_data)
tokenizer = text_ops.WordpieceTokenizer(
vocab_lookup_table=self._create_table((_BERT_VOCAB_PATH)),
unknown_token=None,
token_out_type=dtypes.int64)
self._run(tokenizer)
def benchmark_sentencepiece_tokenizer(self):
model = gfile.GFile((_SENTENCEPIECE_MODEL_FILE), "rb").read()
tokenizer = text_ops.SentencepieceTokenizer(model)
self._run(tokenizer)
# TODO(irinabejan): Add benchmark for detokenization
def _get_char_level_splits(self):
"""Get splits that match inputs char level."""
char_tokenizer = text_ops.UnicodeCharTokenizer()
char_splits = array_ops.zeros_like(char_tokenizer.tokenize(self.input_data))
return char_splits
def benchmark_split_merge_tokenizer(self):
if FLAGS.ragged_vs_dense:
return
random_seed.set_seed(5)
char_splits = self._get_char_level_splits()
if not context.executing_eagerly():
# Evaluate splits as their shape cannot be infered in graph mode
# and are needed for mapping
with session.Session() as sess:
sess.run(self.iterator.initializer)
char_splits = sess.run(char_splits)
def randomize_splits(inputs):
return random_ops.random_uniform(
inputs.shape, maxval=2, dtype=dtypes.int32)
labels = ragged_functional_ops.map_flat_values(randomize_splits,
char_splits)
if not context.executing_eagerly():
# Evaluate labels computation to exclude these steps from op benchmarking
with session.Session() as sess:
labels = sess.run(labels)
tokenizer = text_ops.SplitMergeTokenizer()
self._run(tokenizer, {"labels": labels})
def benchmark_split_merge_from_logits_tokenizer(self):
if FLAGS.ragged_vs_dense:
return
random_seed.set_seed(5)
char_splits = self._get_char_level_splits().to_tensor()
if not context.executing_eagerly():
with session.Session() as sess:
sess.run(self.iterator.initializer)
char_splits = sess.run(char_splits)
logits = random_ops.random_uniform(
char_splits.shape + (2,), minval=-6, maxval=6, dtype=dtypes.float32)
if not context.executing_eagerly():
# Evaluate logits computation to exclude these steps from op benchmarking
with session.Session() as sess:
logits = sess.run(logits)
tokenizer = text_ops.SplitMergeFromLogitsTokenizer()
self._run(tokenizer, {"logits": logits})
class RegexSplitOpsBenchmark(benchmark_utils.OpsBaseBenchmark):
"""Benchmarks for regex split ops."""
def __init__(self):
if not FLAGS.run_eagerly:
ops.disable_eager_execution()
self.use_tf_function = FLAGS.use_tf_function
self.load_input_data(FLAGS.batch_size)
def benchmark_regex_split_ops(self):
op = text_ops.regex_split_with_offsets if FLAGS.with_offsets else text_ops.regex_split
kwargs = {"delim_regex_pattern": r"[\p{S}|\p{P}]+|\s"}
self.run_and_report(
op,
FLAGS.run_iters,
FLAGS.burn_iters,
xprof_enabled=FLAGS.xprof_tracing,
**(kwargs or {}))
if __name__ == "__main__":
app.run(test.main())
|
|
import numpy as np
import tensorflow as tf
import scipy.misc as sm
FLAGS = tf.app.flags.FLAGS
def gen_pred_img(ffg, fbg, lfg):
border = 2
shape = ffg.shape # [h, w, c]
image = np.ones([shape[0]+2*border, shape[1]*3+4*border, shape[2]]) * 255
image[border:-border,border:shape[1]+border] = ffg
image[border:-border,shape[1]+border*2:2*shape[1]+border*2] = fbg
image[border:-border,2*shape[1]+3*border:-border] = lfg
return image
def gen_pred_vid(vid):
shape = vid.shape
vid_img = np.zeros((shape[1], shape[0]*shape[2], shape[3]))
for i in range(shape[0]):
vid_img[:,i*shape[2]:(i+1)*shape[2]] = vid[i]
return vid_img
def decode_frames(frame_list, h, w, l):
clip = []
for i in range(l):
frame = frame_list[i]
image = tf.cast(tf.image.decode_jpeg(frame), tf.float32)
image.set_shape((h, w, 3))
clip.append(image)
return tf.stack(clip)
def generate_mask(img_mask_list, h, w, l):
img_masks, loss_masks = [], []
for i in range(l):
# generate image mask
img_mask = img_mask_list[i]
img_mask = tf.cast(tf.image.decode_png(img_mask), tf.float32)
img_mask = tf.reshape(img_mask, (h, w))
img_masks.append(img_mask)
# generate loss mask
s_total = h * w
s_mask = tf.reduce_sum(img_mask)
def f1(): return img_mask*((s_total-s_mask)/s_mask-1)+1
def f2(): return tf.zeros_like(img_mask)
def f3(): return tf.ones_like(img_mask)
loss_mask = tf.case([(tf.equal(s_mask, 0), f2), \
(tf.less(s_mask, s_total/2), f1)],
default=f3)
loss_masks.append(loss_mask)
return tf.stack(img_masks), tf.stack(loss_masks)
def read_my_file_format(filename_queue, is_training):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
context_features = {
"height": tf.FixedLenFeature([], dtype=tf.int64),
"width": tf.FixedLenFeature([], dtype=tf.int64),
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64),
"text": tf.FixedLenFeature([], dtype=tf.string),
"label": tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
"frames": tf.FixedLenSequenceFeature([], dtype=tf.string),
"masks": tf.FixedLenSequenceFeature([], dtype=tf.string)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
# start queue runner so it won't stuck
tf.train.start_queue_runners(sess=tf.get_default_session())
height = FLAGS.height
width = FLAGS.width
sequence_length = 32
clip = decode_frames(sequence_parsed['frames'], height, width, sequence_length)
img_mask, loss_mask = generate_mask(sequence_parsed['masks'], \
height, width, sequence_length)
if is_training:
# randomly sample clips of 16 frames
idx = tf.squeeze(tf.random_uniform([1], 0, sequence_length-FLAGS.seq_length+1, dtype=tf.int32))
else:
# sample the middle clip
idx = 8
clip = clip[idx:idx+FLAGS.seq_length] / 255.0 * 2 - 1
img_mask = img_mask[idx:idx+FLAGS.seq_length]
loss_mask = loss_mask[idx:idx+FLAGS.seq_length]
if is_training:
# randomly temporally flip data
reverse = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
clip = tf.cond(tf.equal(reverse,0), lambda: clip, lambda: clip[::-1])
img_mask = tf.cond(tf.equal(reverse,0), lambda: img_mask, lambda: img_mask[::-1])
loss_mask = tf.cond(tf.equal(reverse,0), lambda: loss_mask, lambda: loss_mask[::-1])
clip.set_shape([FLAGS.seq_length, height, width, 3])
img_mask.set_shape([FLAGS.seq_length, height, width])
loss_mask.set_shape([FLAGS.seq_length, height, width])
# randomly horizontally flip data
flip = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
img_list, img_mask_list, loss_mask_list = tf.unstack(clip), tf.unstack(img_mask), tf.unstack(loss_mask)
flip_clip, flip_img_mask, flip_loss_mask = [], [], []
for i in range(FLAGS.seq_length):
flip_clip.append(tf.cond(tf.equal(flip, 0), lambda: img_list[i], lambda: tf.image.flip_left_right(img_list[i])))
flip_img_mask.append(tf.cond(tf.equal(flip, 0), lambda: img_mask_list[i], \
lambda: tf.squeeze(tf.image.flip_left_right(tf.expand_dims(img_mask_list[i],-1)),-1)))
flip_loss_mask.append(tf.cond(tf.equal(flip, 0), lambda: loss_mask_list[i], \
lambda: tf.squeeze(tf.image.flip_left_right(tf.expand_dims(loss_mask_list[i],-1)),-1)))
clip = tf.stack(flip_clip)
img_mask = tf.stack(flip_img_mask)
loss_mask = tf.stack(flip_loss_mask)
clip.set_shape([FLAGS.seq_length, height, width, 3])
img_mask.set_shape([FLAGS.seq_length, height, width])
loss_mask.set_shape([FLAGS.seq_length, height, width])
return clip, img_mask, loss_mask
def input_pipeline(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
# initialize local variables if num_epochs is not None or it'll raise uninitialized problem
tf.get_default_session().run(tf.local_variables_initializer())
example_list = [read_my_file_format(filename_queue, is_training) \
for _ in range(read_threads)]
min_after_dequeue = 300 if is_training else 10
capacity = min_after_dequeue + 3 * batch_size
clip_batch, img_mask_batch, loss_mask_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return clip_batch, img_mask_batch, loss_mask_batch
def read_my_file_format_dis(filename_queue, is_training):
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
context_features = {
"height": tf.FixedLenFeature([], dtype=tf.int64),
"width": tf.FixedLenFeature([], dtype=tf.int64),
"sequence_length": tf.FixedLenFeature([], dtype=tf.int64),
"text": tf.FixedLenFeature([], dtype=tf.string),
"label": tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
"frames": tf.FixedLenSequenceFeature([], dtype=tf.string),
"masks": tf.FixedLenSequenceFeature([], dtype=tf.string)
}
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=serialized_example,
context_features=context_features,
sequence_features=sequence_features
)
height = 128#context_parsed['height'].eval()
width = 128#context_parsed['width'].eval()
sequence_length = 32#context_parsed['sequence_length'].eval()
clip = decode_frames(sequence_parsed['frames'], height, width, sequence_length)
# generate one hot vector
label = context_parsed['label']
label = tf.one_hot(label-1, FLAGS.num_class)
text = context_parsed['text']
# randomly sample clips of 16 frames
if is_training:
idx = tf.squeeze(tf.random_uniform([1], 0, sequence_length-FLAGS.seq_length+1, dtype=tf.int32))
else:
idx = 8
clip = clip[idx:idx+FLAGS.seq_length] / 255.0 * 2 - 1
if is_training:
# randomly reverse data
reverse = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
clip = tf.cond(tf.equal(reverse,0), lambda: clip, lambda: clip[::-1])
# randomly horizontally flip data
flip = tf.squeeze(tf.random_uniform([1], 0, 2, dtype=tf.int32))
clip = tf.cond(tf.equal(flip,0), lambda: clip, lambda: \
tf.map_fn(lambda img: tf.image.flip_left_right(img), clip))
clip.set_shape([FLAGS.seq_length, height, width, 3])
return clip, label, text
def input_pipeline_dis(filenames, batch_size, read_threads=4, num_epochs=None, is_training=True):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=FLAGS.num_epochs, shuffle=is_training)
# initialize local variables if num_epochs is not None or it'll raise uninitialized problem
tf.get_default_session().run(tf.local_variables_initializer())
example_list = [read_my_file_format_dis(filename_queue, is_training) \
for _ in range(read_threads)]
min_after_dequeue = 300 if is_training else 10
capacity = min_after_dequeue + 3 * batch_size
clip_batch, label_batch, text_batch = tf.train.shuffle_batch_join(
example_list, batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue)
return clip_batch, label_batch, text_batch
def average_gradients(tower_grads):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def average_gradients_dis(tower_grads, encoder_gradient_ratio):
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
if 'c3d' in v.name or 'mapping' in v.name:
g = g * encoder_gradient_ratio
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
if len(grads) == 0:
continue
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
|
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core methods for checking EcmaScript files for common style guide violations.
"""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)',
'jacobr@google.com (Jacob Richman)')
import re
import gflags as flags
from closure_linter import checkerbase
from closure_linter import ecmametadatapass
from closure_linter import error_check
from closure_linter import errorrules
from closure_linter import errors
from closure_linter import indentation
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import statetracker
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
FLAGS = flags.FLAGS
flags.DEFINE_list('custom_jsdoc_tags', '', 'Extra jsdoc tags to allow')
# TODO(user): When flipping this to True, remove logic from unit tests
# that overrides this flag.
flags.DEFINE_boolean('dot_on_next_line', False, 'Require dots to be'
'placed on the next line for wrapped expressions')
# TODO(robbyw): Check for extra parens on return statements
# TODO(robbyw): Check for 0px in strings
# TODO(robbyw): Ensure inline jsDoc is in {}
# TODO(robbyw): Check for valid JS types in parameter docs
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Modes = javascripttokenizer.JavaScriptModes
Position = position.Position
Rule = error_check.Rule
Type = javascripttokens.JavaScriptTokenType
class EcmaScriptLintRules(checkerbase.LintRulesBase):
"""EmcaScript lint style checking rules.
Can be used to find common style errors in JavaScript, ActionScript and other
Ecma like scripting languages. Style checkers for Ecma scripting languages
should inherit from this style checker.
Please do not add any state to EcmaScriptLintRules or to any subclasses.
All state should be added to the StateTracker subclass used for a particular
language.
"""
# It will be initialized in constructor so the flags are initialized.
max_line_length = -1
# Static constants.
MISSING_PARAMETER_SPACE = re.compile(r',\S')
EXTRA_SPACE = re.compile(r'(\(\s|\s\))')
ENDS_WITH_SPACE = re.compile(r'\s$')
ILLEGAL_TAB = re.compile(r'\t')
# Regex used to split up complex types to check for invalid use of ? and |.
TYPE_SPLIT = re.compile(r'[,<>()]')
# Regex for form of author lines after the @author tag.
AUTHOR_SPEC = re.compile(r'(\s*)[^\s]+@[^(\s]+(\s*)\(.+\)')
# Acceptable tokens to remove for line too long testing.
LONG_LINE_IGNORE = frozenset(
['*', '//', '@see'] +
['@%s' % tag for tag in statetracker.DocFlag.HAS_TYPE])
JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED = frozenset([
'@fileoverview', '@param', '@return', '@returns'])
def __init__(self):
"""Initialize this lint rule object."""
checkerbase.LintRulesBase.__init__(self)
if EcmaScriptLintRules.max_line_length == -1:
EcmaScriptLintRules.max_line_length = errorrules.GetMaxLineLength()
def Initialize(self, checker, limited_doc_checks, is_html):
"""Initialize this lint rule object before parsing a new file."""
checkerbase.LintRulesBase.Initialize(self, checker, limited_doc_checks,
is_html)
self._indentation = indentation.IndentationRules()
def HandleMissingParameterDoc(self, token, param_name):
"""Handle errors associated with a parameter missing a @param tag."""
raise TypeError('Abstract method HandleMissingParameterDoc not implemented')
def _CheckLineLength(self, last_token, state):
"""Checks whether the line is too long.
Args:
last_token: The last token in the line.
state: parser_state object that indicates the current state in the page
"""
# Start from the last token so that we have the flag object attached to
# and DOC_FLAG tokens.
line_number = last_token.line_number
token = last_token
# Build a representation of the string where spaces indicate potential
# line-break locations.
line = []
while token and token.line_number == line_number:
if state.IsTypeToken(token):
line.insert(0, 'x' * len(token.string))
elif token.type in (Type.IDENTIFIER, Type.OPERATOR):
# Dots are acceptable places to wrap (may be tokenized as identifiers).
line.insert(0, token.string.replace('.', ' '))
else:
line.insert(0, token.string)
token = token.previous
line = ''.join(line)
line = line.rstrip('\n\r\f')
try:
length = len(unicode(line, 'utf-8'))
except (LookupError, UnicodeDecodeError):
# Unknown encoding. The line length may be wrong, as was originally the
# case for utf-8 (see bug 1735846). For now just accept the default
# length, but as we find problems we can either add test for other
# possible encodings or return without an error to protect against
# false positives at the cost of more false negatives.
length = len(line)
if length > EcmaScriptLintRules.max_line_length:
# If the line matches one of the exceptions, then it's ok.
for long_line_regexp in self.GetLongLineExceptions():
if long_line_regexp.match(last_token.line):
return
# If the line consists of only one "word", or multiple words but all
# except one are ignoreable, then it's ok.
parts = set(line.split())
# We allow two "words" (type and name) when the line contains @param
max_parts = 1
if '@param' in parts:
max_parts = 2
# Custom tags like @requires may have url like descriptions, so ignore
# the tag, similar to how we handle @see.
custom_tags = set(['@%s' % f for f in FLAGS.custom_jsdoc_tags])
if (len(parts.difference(self.LONG_LINE_IGNORE | custom_tags))
> max_parts):
self._HandleError(
errors.LINE_TOO_LONG,
'Line too long (%d characters).' % len(line), last_token)
def _CheckJsDocType(self, token, js_type):
"""Checks the given type for style errors.
Args:
token: The DOC_FLAG token for the flag whose type to check.
js_type: The flag's typeannotation.TypeAnnotation instance.
"""
if not js_type: return
if js_type.type_group and len(js_type.sub_types) == 2:
identifiers = [t.identifier for t in js_type.sub_types]
if 'null' in identifiers:
# Don't warn if the identifier is a template type (e.g. {TYPE|null}.
if not identifiers[0].isupper() and not identifiers[1].isupper():
self._HandleError(
errors.JSDOC_PREFER_QUESTION_TO_PIPE_NULL,
'Prefer "?Type" to "Type|null": "%s"' % js_type, token)
# TODO(user): We should report an error for wrong usage of '?' and '|'
# e.g. {?number|string|null} etc.
for sub_type in js_type.IterTypes():
self._CheckJsDocType(token, sub_type)
def _CheckForMissingSpaceBeforeToken(self, token):
"""Checks for a missing space at the beginning of a token.
Reports a MISSING_SPACE error if the token does not begin with a space or
the previous token doesn't end with a space and the previous token is on the
same line as the token.
Args:
token: The token being checked
"""
# TODO(user): Check if too many spaces?
if (len(token.string) == len(token.string.lstrip()) and
token.previous and token.line_number == token.previous.line_number and
len(token.previous.string) - len(token.previous.string.rstrip()) == 0):
self._HandleError(
errors.MISSING_SPACE,
'Missing space before "%s"' % token.string,
token,
position=Position.AtBeginning())
def _CheckOperator(self, token):
"""Checks an operator for spacing and line style.
Args:
token: The operator token.
"""
last_code = token.metadata.last_code
if not self._ExpectSpaceBeforeOperator(token):
if (token.previous and token.previous.type == Type.WHITESPACE and
last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER) and
last_code.line_number == token.line_number):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' % token.string,
token.previous, position=Position.All(token.previous.string))
elif (token.previous and
not token.previous.IsComment() and
not tokenutil.IsDot(token) and
token.previous.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(errors.MISSING_SPACE,
'Missing space before "%s"' % token.string, token,
position=Position.AtBeginning())
# Check wrapping of operators.
next_code = tokenutil.GetNextCodeToken(token)
is_dot = tokenutil.IsDot(token)
wrapped_before = last_code and last_code.line_number != token.line_number
wrapped_after = next_code and next_code.line_number != token.line_number
if FLAGS.dot_on_next_line and is_dot and wrapped_after:
self._HandleError(
errors.LINE_ENDS_WITH_DOT,
'"." must go on the following line',
token)
if (not is_dot and wrapped_before and
not token.metadata.IsUnaryOperator()):
self._HandleError(
errors.LINE_STARTS_WITH_OPERATOR,
'Binary operator must go on previous line "%s"' % token.string,
token)
def _IsLabel(self, token):
# A ':' token is considered part of a label if it occurs in a case
# statement, a plain label, or an object literal, i.e. is not part of a
# ternary.
return (token.string == ':' and
token.metadata.context.type in (Context.LITERAL_ELEMENT,
Context.CASE_BLOCK,
Context.STATEMENT))
def _ExpectSpaceBeforeOperator(self, token):
"""Returns whether a space should appear before the given operator token.
Args:
token: The operator token.
Returns:
Whether there should be a space before the token.
"""
if token.string == ',' or token.metadata.IsUnaryPostOperator():
return False
if tokenutil.IsDot(token):
return False
# Colons should appear in labels, object literals, the case of a switch
# statement, and ternary operator. Only want a space in the case of the
# ternary operator.
if self._IsLabel(token):
return False
if token.metadata.IsUnaryOperator() and token.IsFirstInLine():
return False
return True
def CheckToken(self, token, state):
"""Checks a token, given the current parser_state, for warnings and errors.
Args:
token: The current token under consideration
state: parser_state object that indicates the current state in the page
"""
# Store some convenience variables
first_in_line = token.IsFirstInLine()
last_in_line = token.IsLastInLine()
last_non_space_token = state.GetLastNonSpaceToken()
token_type = token.type
# Process the line change.
if not self._is_html and error_check.ShouldCheck(Rule.INDENTATION):
# TODO(robbyw): Support checking indentation in HTML files.
indentation_errors = self._indentation.CheckToken(token, state)
for indentation_error in indentation_errors:
self._HandleError(*indentation_error)
if last_in_line:
self._CheckLineLength(token, state)
if token_type == Type.PARAMETERS:
# Find missing spaces in parameter lists.
if self.MISSING_PARAMETER_SPACE.search(token.string):
fix_data = ', '.join([s.strip() for s in token.string.split(',')])
self._HandleError(errors.MISSING_SPACE, 'Missing space after ","',
token, position=None, fix_data=fix_data.strip())
# Find extra spaces at the beginning of parameter lists. Make sure
# we aren't at the beginning of a continuing multi-line list.
if not first_in_line:
space_count = len(token.string) - len(token.string.lstrip())
if space_count:
self._HandleError(errors.EXTRA_SPACE, 'Extra space after "("',
token, position=Position(0, space_count))
elif (token_type == Type.START_BLOCK and
token.metadata.context.type == Context.BLOCK):
self._CheckForMissingSpaceBeforeToken(token)
elif token_type == Type.END_BLOCK:
last_code = token.metadata.last_code
if state.InFunction() and state.IsFunctionClose():
if state.InTopLevelFunction():
# A semicolons should not be included at the end of a function
# declaration.
if not state.InAssignedFunction():
if not last_in_line and token.next.type == Type.SEMICOLON:
self._HandleError(
errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,
'Illegal semicolon after function declaration',
token.next, position=Position.All(token.next.string))
# A semicolon should be included at the end of a function expression
# that is not immediately called or used by a dot operator.
if (state.InAssignedFunction() and token.next
and token.next.type != Type.SEMICOLON):
next_token = tokenutil.GetNextCodeToken(token)
is_immediately_used = (next_token.type == Type.START_PAREN or
tokenutil.IsDot(next_token))
if not is_immediately_used:
self._HandleError(
errors.MISSING_SEMICOLON_AFTER_FUNCTION,
'Missing semicolon after function assigned to a variable',
token, position=Position.AtEnd(token.string))
if state.InInterfaceMethod() and last_code.type != Type.START_BLOCK:
self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,
'Interface methods cannot contain code', last_code)
elif (state.IsBlockClose() and
token.next and token.next.type == Type.SEMICOLON):
if (last_code.metadata.context.parent.type != Context.OBJECT_LITERAL
and last_code.metadata.context.type != Context.OBJECT_LITERAL):
self._HandleError(
errors.REDUNDANT_SEMICOLON,
'No semicolon is required to end a code block',
token.next, position=Position.All(token.next.string))
elif token_type == Type.SEMICOLON:
if token.previous and token.previous.type == Type.WHITESPACE:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before ";"',
token.previous, position=Position.All(token.previous.string))
if token.next and token.next.line_number == token.line_number:
if token.metadata.context.type != Context.FOR_GROUP_BLOCK:
# TODO(robbyw): Error about no multi-statement lines.
pass
elif token.next.type not in (
Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):
self._HandleError(
errors.MISSING_SPACE,
'Missing space after ";" in for statement',
token.next,
position=Position.AtBeginning())
last_code = token.metadata.last_code
if last_code and last_code.type == Type.SEMICOLON:
# Allow a single double semi colon in for loops for cases like:
# for (;;) { }.
# NOTE(user): This is not a perfect check, and will not throw an error
# for cases like: for (var i = 0;; i < n; i++) {}, but then your code
# probably won't work either.
for_token = tokenutil.CustomSearch(
last_code,
lambda token: token.type == Type.KEYWORD and token.string == 'for',
end_func=lambda token: token.type == Type.SEMICOLON,
distance=None,
reverse=True)
if not for_token:
self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',
token, position=Position.All(token.string))
elif token_type == Type.START_PAREN:
# Ensure that opening parentheses have a space before any keyword
# that is not being invoked like a member function.
if (token.previous and token.previous.type == Type.KEYWORD and
(not token.previous.metadata or
not token.previous.metadata.last_code or
not token.previous.metadata.last_code.string or
token.previous.metadata.last_code.string[-1:] != '.')):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "("',
token, position=Position.AtBeginning())
elif token.previous and token.previous.type == Type.WHITESPACE:
before_space = token.previous.previous
# Ensure that there is no extra space before a function invocation,
# even if the function being invoked happens to be a keyword.
if (before_space and before_space.line_number == token.line_number and
before_space.type == Type.IDENTIFIER or
(before_space.type == Type.KEYWORD and before_space.metadata and
before_space.metadata.last_code and
before_space.metadata.last_code.string and
before_space.metadata.last_code.string[-1:] == '.')):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "("',
token.previous, position=Position.All(token.previous.string))
elif token_type == Type.START_BRACKET:
self._HandleStartBracket(token, last_non_space_token)
elif token_type in (Type.END_PAREN, Type.END_BRACKET):
# Ensure there is no space before closing parentheses, except when
# it's in a for statement with an omitted section, or when it's at the
# beginning of a line.
if (token.previous and token.previous.type == Type.WHITESPACE and
not token.previous.IsFirstInLine() and
not (last_non_space_token and last_non_space_token.line_number ==
token.line_number and
last_non_space_token.type == Type.SEMICOLON)):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "%s"' %
token.string, token.previous,
position=Position.All(token.previous.string))
elif token_type == Type.WHITESPACE:
if self.ILLEGAL_TAB.search(token.string):
if token.IsFirstInLine():
if token.next:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace before "%s"' % token.next.string,
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace',
token, position=Position.All(token.string))
else:
self._HandleError(
errors.ILLEGAL_TAB,
'Illegal tab in whitespace after "%s"' % token.previous.string,
token, position=Position.All(token.string))
# Check whitespace length if it's not the first token of the line and
# if it's not immediately before a comment.
if last_in_line:
# Check for extra whitespace at the end of a line.
self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',
token, position=Position.All(token.string))
elif not first_in_line and not token.next.IsComment():
if token.length > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after "%s"' %
token.previous.string, token,
position=Position(1, len(token.string) - 1))
elif token_type == Type.OPERATOR:
self._CheckOperator(token)
elif token_type == Type.DOC_FLAG:
flag = token.attached_object
if flag.flag_type == 'bug':
# TODO(robbyw): Check for exactly 1 space on the left.
string = token.next.string.lstrip()
string = string.split(' ', 1)[0]
if not string.isdigit():
self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,
'@bug should be followed by a bug number', token)
elif flag.flag_type == 'suppress':
if flag.type is None:
# A syntactically invalid suppress tag will get tokenized as a normal
# flag, indicating an error.
self._HandleError(
errors.INCORRECT_SUPPRESS_SYNTAX,
'Invalid suppress syntax: should be @suppress {errortype}. '
'Spaces matter.', token)
else:
for suppress_type in flag.jstype.IterIdentifiers():
if suppress_type not in state.GetDocFlag().SUPPRESS_TYPES:
self._HandleError(
errors.INVALID_SUPPRESS_TYPE,
'Invalid suppression type: %s' % suppress_type, token)
elif (error_check.ShouldCheck(Rule.WELL_FORMED_AUTHOR) and
flag.flag_type == 'author'):
# TODO(user): In non strict mode check the author tag for as much as
# it exists, though the full form checked below isn't required.
string = token.next.string
result = self.AUTHOR_SPEC.match(string)
if not result:
self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,
'Author tag line should be of the form: '
'@author foo@somewhere.com (Your Name)',
token.next)
else:
# Check spacing between email address and name. Do this before
# checking earlier spacing so positions are easier to calculate for
# autofixing.
num_spaces = len(result.group(2))
if num_spaces < 1:
self._HandleError(errors.MISSING_SPACE,
'Missing space after email address',
token.next, position=Position(result.start(2), 0))
elif num_spaces > 1:
self._HandleError(
errors.EXTRA_SPACE, 'Extra space after email address',
token.next,
position=Position(result.start(2) + 1, num_spaces - 1))
# Check for extra spaces before email address. Can't be too few, if
# not at least one we wouldn't match @author tag.
num_spaces = len(result.group(1))
if num_spaces > 1:
self._HandleError(errors.EXTRA_SPACE,
'Extra space before email address',
token.next, position=Position(1, num_spaces - 1))
elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and
not self._limited_doc_checks):
if flag.flag_type == 'param':
if flag.name is None:
self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,
'Missing name in @param tag', token)
if not flag.description or flag.description is None:
flag_name = token.type
if 'name' in token.values:
flag_name = '@' + token.values['name']
if flag_name not in self.JSDOC_FLAGS_DESCRIPTION_NOT_REQUIRED:
self._HandleError(
errors.MISSING_JSDOC_TAG_DESCRIPTION,
'Missing description in %s tag' % flag_name, token)
else:
self._CheckForMissingSpaceBeforeToken(flag.description_start_token)
if flag.HasType():
if flag.type_start_token is not None:
self._CheckForMissingSpaceBeforeToken(
token.attached_object.type_start_token)
if flag.jstype and not flag.jstype.IsEmpty():
self._CheckJsDocType(token, flag.jstype)
if error_check.ShouldCheck(Rule.BRACES_AROUND_TYPE) and (
flag.type_start_token.type != Type.DOC_START_BRACE or
flag.type_end_token.type != Type.DOC_END_BRACE):
self._HandleError(
errors.MISSING_BRACES_AROUND_TYPE,
'Type must always be surrounded by curly braces.', token)
if token_type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):
if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and
token.values['name'] not in FLAGS.custom_jsdoc_tags):
self._HandleError(
errors.INVALID_JSDOC_TAG,
'Invalid JsDoc tag: %s' % token.values['name'], token)
if (error_check.ShouldCheck(Rule.NO_BRACES_AROUND_INHERIT_DOC) and
token.values['name'] == 'inheritDoc' and
token_type == Type.DOC_INLINE_FLAG):
self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,
'Unnecessary braces around @inheritDoc',
token)
elif token_type == Type.SIMPLE_LVALUE:
identifier = token.values['identifier']
if ((not state.InFunction() or state.InConstructor()) and
state.InTopLevel() and not state.InObjectLiteralDescendant()):
jsdoc = state.GetDocComment()
if not state.HasDocComment(identifier):
# Only test for documentation on identifiers with .s in them to
# avoid checking things like simple variables. We don't require
# documenting assignments to .prototype itself (bug 1880803).
if (not state.InConstructor() and
identifier.find('.') != -1 and not
identifier.endswith('.prototype') and not
self._limited_doc_checks):
comment = state.GetLastComment()
if not (comment and comment.lower().count('jsdoc inherited')):
self._HandleError(
errors.MISSING_MEMBER_DOCUMENTATION,
"No docs found for member '%s'" % identifier,
token)
elif jsdoc and (not state.InConstructor() or
identifier.startswith('this.')):
# We are at the top level and the function/member is documented.
if identifier.endswith('_') and not identifier.endswith('__'):
# Can have a private class which inherits documentation from a
# public superclass.
#
# @inheritDoc is deprecated in favor of using @override, and they
if (jsdoc.HasFlag('override') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_OVERRIDE_PRIVATE,
'%s should not override a private member.' % identifier,
jsdoc.GetFlag('override').flag_token)
if (jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor')
and ('accessControls' not in jsdoc.suppressions)):
self._HandleError(
errors.INVALID_INHERIT_DOC_PRIVATE,
'%s should not inherit from a private member.' % identifier,
jsdoc.GetFlag('inheritDoc').flag_token)
if (not jsdoc.HasFlag('private') and
('underscore' not in jsdoc.suppressions) and not
((jsdoc.HasFlag('inheritDoc') or jsdoc.HasFlag('override')) and
('accessControls' in jsdoc.suppressions))):
self._HandleError(
errors.MISSING_PRIVATE,
'Member "%s" must have @private JsDoc.' %
identifier, token)
if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:
self._HandleError(
errors.UNNECESSARY_SUPPRESS,
'@suppress {underscore} is not necessary with @private',
jsdoc.suppressions['underscore'])
elif (jsdoc.HasFlag('private') and
not self.InExplicitlyTypedLanguage()):
# It is convention to hide public fields in some ECMA
# implementations from documentation using the @private tag.
self._HandleError(
errors.EXTRA_PRIVATE,
'Member "%s" must not have @private JsDoc' %
identifier, token)
# These flags are only legal on localizable message definitions;
# such variables always begin with the prefix MSG_.
for f in ('desc', 'hidden', 'meaning'):
if (jsdoc.HasFlag(f)
and not identifier.startswith('MSG_')
and identifier.find('.MSG_') == -1):
self._HandleError(
errors.INVALID_USE_OF_DESC_TAG,
'Member "%s" should not have @%s JsDoc' % (identifier, f),
token)
# Check for illegaly assigning live objects as prototype property values.
index = identifier.find('.prototype.')
# Ignore anything with additional .s after the prototype.
# Ignore members that start with s_
if index != -1 and (identifier.find('.', index + 11) == -1 and not identifier[index + 11:].startswith('s_')):
equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)
if next_code and (
next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or
next_code.IsOperator('new')):
self._HandleError(
errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,
'Member %s cannot have a non-primitive value' % identifier,
token)
elif token_type == Type.END_PARAMETERS:
# Find extra space at the end of parameter lists. We check the token
# prior to the current one when it is a closing paren.
if (token.previous and token.previous.type == Type.PARAMETERS
and self.ENDS_WITH_SPACE.search(token.previous.string)):
self._HandleError(errors.EXTRA_SPACE, 'Extra space before ")"',
token.previous)
jsdoc = state.GetDocComment()
if state.GetFunction().is_interface:
if token.previous and token.previous.type == Type.PARAMETERS:
self._HandleError(
errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,
'Interface constructor cannot have parameters',
token.previous)
elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')
and not jsdoc.InheritsDocumentation()
and not state.InObjectLiteralDescendant() and not
jsdoc.IsInvalidated()):
distance, edit = jsdoc.CompareParameters(state.GetParams())
if distance:
params_iter = iter(state.GetParams())
docs_iter = iter(jsdoc.ordered_params)
for op in edit:
if op == 'I':
# Insertion.
# Parsing doc comments is the same for all languages
# but some languages care about parameters that don't have
# doc comments and some languages don't care.
# Languages that don't allow variables to by typed such as
# JavaScript care but languages such as ActionScript or Java
# that allow variables to be typed don't care.
if not self._limited_doc_checks:
self.HandleMissingParameterDoc(token, params_iter.next())
elif op == 'D':
# Deletion
self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,
'Found docs for non-existing parameter: "%s"' %
docs_iter.next(), token)
elif op == 'S':
# Substitution
if not self._limited_doc_checks:
self._HandleError(
errors.WRONG_PARAMETER_DOCUMENTATION,
'Parameter mismatch: got "%s", expected "%s"' %
(params_iter.next(), docs_iter.next()), token)
else:
# Equality - just advance the iterators
params_iter.next()
docs_iter.next()
elif token_type == Type.STRING_TEXT:
# If this is the first token after the start of the string, but it's at
# the end of a line, we know we have a multi-line string.
if token.previous.type in (
Type.SINGLE_QUOTE_STRING_START,
Type.DOUBLE_QUOTE_STRING_START) and last_in_line:
self._HandleError(errors.MULTI_LINE_STRING,
'Multi-line strings are not allowed', token)
# This check is orthogonal to the ones above, and repeats some types, so
# it is a plain if and not an elif.
if token.type in Type.COMMENT_TYPES:
if self.ILLEGAL_TAB.search(token.string):
self._HandleError(errors.ILLEGAL_TAB,
'Illegal tab in comment "%s"' % token.string, token)
trimmed = token.string.rstrip()
if last_in_line and token.string != trimmed:
# Check for extra whitespace at the end of a line.
self._HandleError(
errors.EXTRA_SPACE, 'Extra space at end of line', token,
position=Position(len(trimmed), len(token.string) - len(trimmed)))
# This check is also orthogonal since it is based on metadata.
if token.metadata.is_implied_semicolon:
self._HandleError(errors.MISSING_SEMICOLON,
'Missing semicolon at end of line', token)
def _HandleStartBracket(self, token, last_non_space_token):
"""Handles a token that is an open bracket.
Args:
token: The token to handle.
last_non_space_token: The last token that was not a space.
"""
if (not token.IsFirstInLine() and token.previous.type == Type.WHITESPACE and
last_non_space_token and
last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):
self._HandleError(
errors.EXTRA_SPACE, 'Extra space before "["',
token.previous, position=Position.All(token.previous.string))
# If the [ token is the first token in a line we shouldn't complain
# about a missing space before [. This is because some Ecma script
# languages allow syntax like:
# [Annotation]
# class MyClass {...}
# So we don't want to blindly warn about missing spaces before [.
# In the the future, when rules for computing exactly how many spaces
# lines should be indented are added, then we can return errors for
# [ tokens that are improperly indented.
# For example:
# var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =
# [a,b,c];
# should trigger a proper indentation warning message as [ is not indented
# by four spaces.
elif (not token.IsFirstInLine() and token.previous and
token.previous.type not in (
[Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +
Type.EXPRESSION_ENDER_TYPES)):
self._HandleError(errors.MISSING_SPACE, 'Missing space before "["',
token, position=Position.AtBeginning())
def Finalize(self, state):
"""Perform all checks that need to occur after all lines are processed.
Args:
state: State of the parser after parsing all tokens
Raises:
TypeError: If not overridden.
"""
last_non_space_token = state.GetLastNonSpaceToken()
# Check last line for ending with newline.
if state.GetLastLine() and not (
state.GetLastLine().isspace() or
state.GetLastLine().rstrip('\n\r\f') != state.GetLastLine()):
self._HandleError(
errors.FILE_MISSING_NEWLINE,
'File does not end with new line. (%s)' % state.GetLastLine(),
last_non_space_token)
try:
self._indentation.Finalize()
except Exception, e:
self._HandleError(
errors.FILE_DOES_NOT_PARSE,
str(e),
last_non_space_token)
def GetLongLineExceptions(self):
"""Gets a list of regexps for lines which can be longer than the limit.
Returns:
A list of regexps, used as matches (rather than searches).
"""
return []
def InExplicitlyTypedLanguage(self):
"""Returns whether this ecma implementation is explicitly typed."""
return False
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UnwrappedLine primitive for formatting.
An unwrapped line is the containing data structure produced by the parser. It
collects all nodes (stored in FormatToken objects) that could appear on a
single line if there were no line length restrictions. It's then used by the
parser to perform the wrapping required to comply with the style guide.
"""
from lib2to3 import pytree
from yapf.yapflib import format_token
from yapf.yapflib import pytree_utils
from yapf.yapflib import split_penalty
from yapf.yapflib import style
class UnwrappedLine(object):
"""Represents a single unwrapped line in the output.
Attributes:
depth: indentation depth of this line. This is just a numeric value used to
distinguish lines that are more deeply nested than others. It is not the
actual amount of spaces, which is style-dependent.
"""
def __init__(self, depth, tokens=None):
"""Constructor.
Creates a new unwrapped line with the given depth an initial list of tokens.
Constructs the doubly-linked lists for format tokens using their built-in
next_token and previous_token attributes.
Arguments:
depth: indentation depth of this line
tokens: initial list of tokens
"""
self.depth = depth
self._tokens = tokens or []
self.disable = False
if self._tokens:
# Set up a doubly linked list.
for index, tok in enumerate(self._tokens[1:]):
# Note, 'index' is the index to the previous token.
tok.previous_token = self._tokens[index]
self._tokens[index].next_token = tok
def CalculateFormattingInformation(self):
"""Calculate the split penalty and total length for the tokens."""
# Say that the first token in the line should have a space before it. This
# means only that if this unwrapped line is joined with a predecessor line,
# then there will be a space between them.
self.first.spaces_required_before = 1
self.first.total_length = len(self.first.value)
prev_token = self.first
prev_length = self.first.total_length
for token in self._tokens[1:]:
if (token.spaces_required_before == 0 and
_SpaceRequiredBetween(prev_token, token)):
token.spaces_required_before = 1
# The split penalty has to be computed before {must|can}_break_before,
# because these may use it for their decision.
token.split_penalty += _SplitPenalty(prev_token, token)
token.must_break_before = _MustBreakBefore(prev_token, token)
token.can_break_before = (token.must_break_before or
_CanBreakBefore(prev_token, token))
token.total_length = (
prev_length + len(token.value) + token.spaces_required_before
)
prev_length = token.total_length
prev_token = token
############################################################################
# Token Access and Manipulation Methods #
############################################################################
def AppendToken(self, token):
"""Append a new FormatToken to the tokens contained in this line."""
if self._tokens:
token.previous_token = self.last
self.last.next_token = token
self._tokens.append(token)
def AppendNode(self, node):
"""Convenience method to append a pytree node directly.
Wraps the node with a FormatToken.
Arguments:
node: the node to append
"""
assert isinstance(node, pytree.Leaf)
self.AppendToken(format_token.FormatToken(node))
@property
def first(self):
"""Returns the first non-whitespace token."""
return self._tokens[0]
@property
def last(self):
"""Returns the last non-whitespace token."""
return self._tokens[-1]
############################################################################
# Token -> String Methods #
############################################################################
def AsCode(self, indent_per_depth=2):
"""Return a "code" representation of this line.
The code representation shows how the line would be printed out as code.
TODO(eliben): for now this is rudimentary for debugging - once we add
formatting capabilities, this method will have other uses (not all tokens
have spaces around them, for example).
Arguments:
indent_per_depth: how much spaces to indend per depth level.
Returns:
A string representing the line as code.
"""
indent = ' ' * indent_per_depth * self.depth
tokens_str = ' '.join(tok.value for tok in self._tokens)
return indent + tokens_str
def __str__(self):
return self.AsCode()
def __repr__(self):
tokens_repr = ','.join(['{0}({1!r})'.format(tok.name, tok.value)
for tok in self._tokens])
return 'UnwrappedLine(depth={0}, tokens=[{1}])'.format(self.depth,
tokens_repr)
############################################################################
# Properties #
############################################################################
@property
def tokens(self):
"""Access the tokens contained within this line.
The caller must not modify the tokens list returned by this method.
Returns:
List of tokens in this line.
"""
return self._tokens
@property
def lineno(self):
"""Return the line number of this unwrapped line.
Returns:
The line number of the first token in this unwrapped line.
"""
return self.first.lineno
@property
def is_comment(self):
return self.first.is_comment
def _IsIdNumberStringToken(tok):
return tok.is_keyword or tok.is_name or tok.is_number or tok.is_string
def _IsUnaryOperator(tok):
return format_token.Subtype.UNARY_OPERATOR in tok.subtypes
def _SpaceRequiredBetween(left, right):
"""Return True if a space is required between the left and right token."""
if left.is_continuation or right.is_continuation:
# The continuation node's value has all of the spaces it needs.
return False
if right.name in pytree_utils.NONSEMANTIC_TOKENS:
# No space before a non-semantic token.
return False
if _IsIdNumberStringToken(left) and _IsIdNumberStringToken(right):
# Spaces between keyword, string, number, and identifier tokens.
return True
if left.value == ',' and right.value == ':':
# We do want a space between a comma and colon.
return True
if right.value in ':,':
# Otherwise, we never want a space before a colon or comma.
return False
if left.value == ',' and right.value in ']})':
# Add a space between ending ',' and closing bracket if requested.
return style.Get('SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET')
if left.value == ',':
# We want a space after a comma.
return True
if left.value == 'from' and right.value == '.':
# Space before the '.' in an import statement.
return True
if left.value == '.' and right.value == 'import':
# Space after the '.' in an import statement.
return True
if ((right.is_keyword or right.is_name) and
(left.is_keyword or left.is_name)):
# Don't merge two keywords/identifiers.
return True
if left.is_string and right.value not in '[)]}.':
# A string followed by something other than a subscript, closing bracket,
# or dot should have a space after it.
return True
if left.is_binary_op and _IsUnaryOperator(right):
# Space between the binary opertor and the unary operator.
return True
if _IsUnaryOperator(left) and _IsUnaryOperator(right):
# No space between two unary operators.
return False
if left.is_binary_op or right.is_binary_op:
# Enforce spaces around binary operators.
return True
if (_IsUnaryOperator(left) and left.value != 'not' and
(right.is_name or right.is_number or right.value == '(')):
# The previous token was a unary op. No space is desired between it and
# the current token.
return False
if (format_token.Subtype.SUBSCRIPT_COLON in left.subtypes or
format_token.Subtype.SUBSCRIPT_COLON in right.subtypes):
# A subscript shouldn't have spaces separating its colons.
return False
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in left.subtypes or
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in right.subtypes):
# A named argument or default parameter shouldn't have spaces around it.
return False
if (format_token.Subtype.VARARGS_STAR in left.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in left.subtypes):
# Don't add a space after a vararg's star or a keyword's star-star.
return False
if left.value == '@':
# Decorators shouldn't be separated from the 'at' sign.
return False
if left.value == '.' or right.value == '.':
# Don't place spaces between dots.
return False
if ((left.value == '(' and right.value == ')') or
(left.value == '[' and right.value == ']') or
(left.value == '{' and right.value == '}')):
# Empty objects shouldn't be separted by spaces.
return False
if (left.value in pytree_utils.OPENING_BRACKETS and
right.value in pytree_utils.OPENING_BRACKETS):
# Nested objects' opening brackets shouldn't be separated.
return False
if (left.value in pytree_utils.CLOSING_BRACKETS and
right.value in pytree_utils.CLOSING_BRACKETS):
# Nested objects' closing brackets shouldn't be separated.
return False
if left.value in pytree_utils.CLOSING_BRACKETS and right.value in '([':
# A call, set, dictionary, or subscript that has a call or subscript after
# it shouldn't have a space between them.
return False
if (left.value in pytree_utils.OPENING_BRACKETS and
_IsIdNumberStringToken(right)):
# Don't separate the opening bracket from the first item.
return False
if left.is_name and right.value in '([':
# Don't separate a call or array access from the name.
return False
if right.value in pytree_utils.CLOSING_BRACKETS:
# Don't separate the closing bracket from the last item.
# FIXME(morbo): This might be too permissive.
return False
if left.value == 'print' and right.value == '(':
# Special support for the 'print' function.
return False
if left.value in pytree_utils.OPENING_BRACKETS and _IsUnaryOperator(right):
# Don't separate a unary operator from the opening bracket.
return False
if (left.value in pytree_utils.OPENING_BRACKETS and
(format_token.Subtype.VARARGS_STAR in right.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in right.subtypes)):
# Don't separate a '*' or '**' from the opening bracket.
return False
if right.value == ';':
# Avoid spaces before a semicolon. (Why is there a semicolon?!)
return False
return True
def _MustBreakBefore(prev_token, cur_token):
"""Return True if a line break is required before the current token."""
if prev_token.is_comment:
# Must break if the previous token was a comment.
return True
if (_IsSurroundedByBrackets(cur_token) and cur_token.is_string and
prev_token.is_string):
# We want consecutive strings to be on separate lines. This is a
# reasonable assumption, because otherwise they should have written them
# all on the same line, or with a '+'.
return True
return pytree_utils.GetNodeAnnotation(cur_token.node,
pytree_utils.Annotation.MUST_SPLIT,
default=False)
def _CanBreakBefore(prev_token, cur_token):
"""Return True if a line break may occur before the current token."""
if cur_token.split_penalty >= split_penalty.UNBREAKABLE:
return False
if prev_token.value == '@':
# Don't break right after the beginning of a decorator.
return False
if cur_token.value == ':':
# Don't break before the start of a block of code.
return False
if cur_token.value == ',':
# Don't break before a comma.
return False
if prev_token.is_name and cur_token.value == '(':
# Don't break in the middle of a function definition or call.
return False
if prev_token.is_name and cur_token.value == '[':
# Don't break in the middle of an array dereference.
return False
if prev_token.is_name and cur_token.value == '.':
# Don't break before the '.' in a dotted name.
return False
if cur_token.is_comment and prev_token.lineno == cur_token.lineno:
# Don't break a comment at the end of the line.
return False
# TODO(morbo): There may be more to add here.
return True
def _IsSurroundedByBrackets(tok):
"""Return True if the token is surrounded by brackets."""
paren_count = 0
brace_count = 0
sq_bracket_count = 0
previous_token = tok.previous_token
while previous_token:
if previous_token.value == ')':
paren_count -= 1
elif previous_token.value == '}':
brace_count -= 1
elif previous_token.value == ']':
sq_bracket_count -= 1
if previous_token.value == '(':
if paren_count == 0:
return True
paren_count += 1
elif previous_token.value == '{':
if brace_count == 0:
return True
brace_count += 1
elif previous_token.value == '[':
if sq_bracket_count == 0:
return True
sq_bracket_count += 1
previous_token = previous_token.previous_token
return False
_LOGICAL_OPERATORS = frozenset({'and', 'or'})
_TERM_OPERATORS = frozenset({'*', '/', '%', '//'})
def _SplitPenalty(prev_token, cur_token):
"""Return the penalty for breaking the line before the current token."""
if prev_token.value == 'not':
return split_penalty.UNBREAKABLE
if cur_token.node_split_penalty > 0:
return cur_token.node_split_penalty
if style.Get('SPLIT_BEFORE_LOGICAL_OPERATOR'):
# Prefer to split before 'and' and 'or'.
if prev_token.value in _LOGICAL_OPERATORS:
return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR')
if cur_token.value in _LOGICAL_OPERATORS:
return 0
else:
# Prefer to split after 'and' and 'or'.
if prev_token.value in _LOGICAL_OPERATORS:
return 0
if cur_token.value in _LOGICAL_OPERATORS:
return style.Get('SPLIT_PENALTY_LOGICAL_OPERATOR')
if (format_token.Subtype.COMP_FOR in cur_token.subtypes or
format_token.Subtype.COMP_IF in cur_token.subtypes):
# We don't mind breaking before the 'for' or 'if' of a list comprehension.
return 0
if format_token.Subtype.UNARY_OPERATOR in prev_token.subtypes:
# Try not to break after a unary operator.
return style.Get('SPLIT_PENALTY_AFTER_UNARY_OPERATOR')
if prev_token.value == ',':
# Breaking after a comma is fine, if need be.
return 0
if prev_token.is_binary_op:
# We would rather not split after an equality operator.
return 20
if (format_token.Subtype.VARARGS_STAR in prev_token.subtypes or
format_token.Subtype.KWARGS_STAR_STAR in prev_token.subtypes):
# Don't split after a varargs * or kwargs **.
return split_penalty.UNBREAKABLE
if prev_token.value in pytree_utils.OPENING_BRACKETS:
# Slightly prefer
return style.Get('SPLIT_PENALTY_AFTER_OPENING_BRACKET')
if cur_token.value == ':':
# Don't split before a colon.
return split_penalty.UNBREAKABLE
if cur_token.value == '=':
# Don't split before an assignment.
return split_penalty.UNBREAKABLE
if (format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in prev_token.subtypes or
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN in cur_token.subtypes):
# Don't break before or after an default or named assignment.
return split_penalty.UNBREAKABLE
if cur_token.value == '==':
# We would rather not split before an equality operator.
return split_penalty.STRONGLY_CONNECTED
if prev_token.value in _TERM_OPERATORS or cur_token.value in _TERM_OPERATORS:
return 50
return 0
|
|
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
from Tkinter import *
import Pmw
import tkMessageBox, tkFileDialog
class Main:
def load_pdb(self):
"""Open a window to load a PDB file with the aim of calculating pKa values
Get the PDB file, and see if a calculation has been carried out"""
import os
if not self.pdbfile:
pdbfilename=tkFileDialog.askopenfilename(defaultextension='.pdb',
initialdir=os.getcwd(),
filetypes=[("PDB file","*.pdb"),
("All files","*.*")],
parent=self.master)
if not pdbfilename:
return
else:
pdbfilename=self.pdbfile
#
#
#
self.pdbfilename=os.path.join(os.getcwd(),pdbfilename)
if not os.path.isfile(self.pdbfilename):
tkMessageBox.showwarning("File not found",
'File not found: %s' %pdbfilename
,parent=self.master)
return
#
# Set the name for it
#
self.label_count=0
self.labels='ABCDEFGHIJKLMNOP'
label=self.labels[self.label_count]
#
# Load the PDB file in Protool
#
import Protool
self.Protool_instance=Protool.structureIO()
self.Protool_instance.readpdb(self.pdbfilename)
#
# Setup the calculation arrays
#
import pKaIO
self.calcs[self.pdbfilename]={'instance':pKaIO.pKaIO(self.pdbfilename),'selected_groups':[],'label':label}
self.label_count=self.label_count+1
self.X=self.calcs[self.pdbfilename]['instance']
#
# Open the file and assess status
#
if self.X.assess_status():
import tkMessageBox
answer=tkMessageBox.askyesno('WHAT IF pKa calculation found',
'A WHAT IF pKa calculation has been found for this PDB file.\nDo you want to load the results from this calculation?',
parent=self.master)
if answer:
self.load_titration()
else:
self.pdbfilename_var.set('PDB file: %s' %os.path.split(pdbfilename)[1])
self.pka_status_var.set('pKa calc status: No calculation performed')
del self.calcs[pdbfilename]
else:
self.pdbfilename_var.set('PDB file: %s' %os.path.split(pdbfilename)[1])
self.pka_status_var.set('pKa calc status: No calculation performed')
del self.calcs[pdbfilename]
return
#
# ----
#
def full_pKa_updater(self,message):
self.status2.set(message)
self.master.update_idletasks()
return
def pKaTool_pKa(self):
"""Perform a dirt-cheap pKa calculation using Jens' simple, simple method"""
self.pHstart=0.1
self.pHstop=20.0
self.pHstep=0.1
self.chwin.destroy()
self.simple_win=Toplevel()
self.simple_win.transient(self.master)
self.simple_win.title('pKaTool cheap pKa calculation')
self.set_geometry(self.master,self.simple_win)
self.status=StringVar()
self.status.set('Status: Initialising pKa calculation routines')
Label(self.simple_win,textvariable=self.status).grid(row=5,column=0,columnspan=2)
self.status2=StringVar()
self.status2.set('---')
Label(self.simple_win,textvariable=self.status2,bg='white').grid(row=6,column=0,columnspan=2)
self.update()
#
# Do we have a PDB file?
#
if not self.pdbfilename:
tkMessageBox.showwarning("No PDB file",
"You did not load a PDB file",
parent=self.simple_win)
self.simple_win.destroy()
return
#
# Start the pKa calculation
#
import Protool
import Protool.errors
P=Protool.structureIO()
self.status.set('Reading PDB file')
self.update()
P.readpdb(self.pdbfilename)
self.status.set('Removing non AA atoms')
P.Remove_All_NonAminoAcids()
try:
self.status.set('Calculating site-site interaction energies')
self.update()
matrix=P.calculate_matrix(self.pdbfilename,updater=self.full_pKa_updater)
self.status.set('Calculating desolvation energies')
self.update()
desolv=P.calculate_desolvation(self.pdbfilename,updater=self.full_pKa_updater)
#
self.status.set('Calculating background interaction energies')
self.update()
backgr=P.calculate_background(self.pdbfilename,updater=self.full_pKa_updater)
except Protool.errors.AtomNotFoundError,inst:
tkMessageBox.showwarning("Atom missing",
"At least one atom is missing in the PDB file.\nMake sure that all atoms are present before starting a pKa calculation.\nYou can e.g. use the pdb2pqr server for that.\nError:\n%s" %inst,
parent=self.simple_win)
self.simple_win.destroy()
return
self.status.set('Calculating titration using Monte Carlo sampling')
self.update()
import pKa_calc
try:
C=pKa_calc.Monte_Carlo_CPP()
C.test()
except:
tkMessageBox.showwarning("C++ module not found",
'C++ module pMC not found. I will revert to a python implementation of the Tanford-Roxby algorithm.\n\nIf you are running linux, then you should try to cd to the pKaTool directory and type "make"',
parent=self.master)
self.status.set('Calculating titration curves using Tanford-Roxby algorithm')
self.update()
C=pKa_calc.Tanford_Roxby()
#
# Load the values
#
C.matrix=matrix
C.desolv=desolv
C.backgr=backgr
C.calc_pKas(mcsteps=200000,phstep=self.pHstep,phstart=self.pHstart,phend=self.pHstop,complete_pka=1)
#
# Write the titration curve file and the PKA.DAT file
#
import pKaIO
X=pKaIO.pKaIO()
X.write_titration_curve(self.pdbfilename+'.TITCURV.DAT',C.prot_states)
X.write_pka(self.pdbfilename+'.PKA.DAT',C.pka)
self.status.set('Done')
self.update()
#
# Load the results
#
self.simple_win.destroy()
self.chwin.destroy()
self.load_titration(pdbfilename=self.pdbfilename)
tkMessageBox.showwarning("pKa calculation done",'The pKa calculation has finished',parent=self.master)
return
#
# ----
#
def WI_pKa(self):
"""Setup for a WHAT IF pKa calculation"""
tkMessageBox.showwarning("Not implemented",'Not implemented yet',parent=self.calc_window)
return
#
# -----
#
def load_frompKD(self,event=None):
"""Load calculated pKa values from the pKD server"""
self.pKD_IFwin=Toplevel()
self.set_position(self.pKD_IFwin)
self.pKD_IFwin.title('pKD server connection')
self.URL='/cgi-bin/pKD/pKaDesign_server.py'
self.HOST='enzyme.ucd.ie'
self.PORT='80'
Label(self.pKD_IFwin,text='pKD server at: http://%s:%s%s' %(self.HOST,self.PORT,self.URL),bg='green').grid(row=0,column=0)
#
# Get the list of proteins that are prepared
#
request ={}
X=HTTP_handler(self.URL,self.HOST,self.PORT)
request['action']='get_prepared_proteins'
request['client']='PKATOOL'
X.load(request)
lines=X.request(return_type='file-object')
#
# Parse the XML
#
proteins=[]
from xml.dom import minidom
xmldoc=minidom.parse(lines)
prot_sec=xmldoc.firstChild
for calc in prot_sec.childNodes:
ID=calc.attributes['id'].value
for attr in calc.childNodes:
if attr.nodeName=='info':
import string
if len(attr.childNodes)>0:
info=string.strip(attr.childNodes[0].data)
else:
info='No info'
proteins.append([ID,info,None])
import string
#
# Calculations found
#
Label(self.pKD_IFwin,text='Calculations on the pKD server').grid(row=1,column=0)
row=2
yscrollbar=Scrollbar(self.pKD_IFwin,orient='vertical',width=14)
yscrollbar.grid(row=row,column=2,sticky='nws',rowspan=5)
self.calculations=Listbox(self.pKD_IFwin,
bg='white',
fg='black',
height=10,width=15,yscrollcommand=yscrollbar.set,
selectmode=MULTIPLE)
self.calculations.grid(row=row,column=0,columnspan=2,sticky='news',padx=2,rowspan=5)
yscrollbar.config(command=self.calculations.yview)
#
# Sort the calcs and insert them
#
self.calculations_ready=proteins
self.calculations_ready.sort()
self.calcs_displayed=self.calculations_ready
for calcname,info,setup in self.calculations_ready:
self.calculations.insert(END,'%6s: %25s' %(calcname,info[:45]))
#
# Box with details
#
self.searchtext=StringVar()
l=Label(self.pKD_IFwin,text='Search ')
l.grid(row=row,column=3,sticky='nes')
self.searchbox=Entry(self.pKD_IFwin,textvariable=self.searchtext)
self.searchbox.grid(row=row,column=4,sticky='w')
self.searchbox.bind('<Return>',self.do_search)
#
#
row=row+1
lbl=Label(self.pKD_IFwin,text='this is where details on the calcs will go in the future')
lbl.grid(row=row,column=3,columnspan=5)
#
# Buttons for selecting
#
row=row+6
Button(self.pKD_IFwin,text='Select All',command=self.pKDselect_all).grid(row=row,column=0)
Button(self.pKD_IFwin,text='Clear selection',command=self.pKDselect_none).grid(row=row,column=1)
#
# Button for loading calculation
#
row=row+1
self.singlebutton=Button(self.pKD_IFwin,text='Load calculation(s)',command=self.do_load_pKDcalcs)
self.singlebutton.grid(row=row,column=0)
Button(self.pKD_IFwin,text='Cancel',command=self.pKD_IFwin.destroy).grid(row=row,column=1)
return
#
# ----
#
def pKDselect_all(self):
"""Select all of the calcs displayed in the listbox"""
self.calculations.selection_set(0,END)
return
#
# -----
#
def pKDselect_none(self):
"""Clear the selection"""
self.calculations.selection_clear(0,END)
return
#
# ----
#
def do_search(self,event=None):
"""Search for the calcs where a certain text is found"""
self.calculations.delete(0,END)
self.calcs_displayed=[]
text=self.searchtext.get()
for calcname,info,setup in self.calculations_ready:
if calcname.find(text)!=-1 or info.find(text)!=-1:
self.calcs_displayed.append([calcname,info,setup])
self.calculations.insert(END,'%6s: %25s' %(calcname,info[:45]))
return
#
# ----
#
def get_pKD_response(self,command,URL=None):
"""Send a command to the pKD server and return the response"""
request ={}
for c in command.keys():
request[c]=command[c]
request['client']='PKATOOL'
#
if not URL:
URL=self.URL
X=HTTP_handler(URL,self.HOST,self.PORT)
X.load(request)
#
lines=X.request().split('\n')
response=[]
import string
for count in range(len(lines)):
line=lines[count]
if string.strip(line)=='PKATOOLOUTPUT:':
response=lines[count+1:]
return response
#
# ----
#
#
# ----
#
def do_load_pKDcalcs(self):
"""Load pKa calculations from the pKD server into pKaTool"""
selection=self.calculations.curselection()
if len(selection)==0:
return
self.pKDname=StringVar()
self.pKDname.set('Ready')
Label(self.pKD_IFwin,textvariable=self.pKDname).grid(row=26,column=0)
count=0
for sel in selection:
count=count+1
self.do_load_singlepKDcalc(int(sel),count,len(selection))
self.pKD_IFwin.destroy()
return
#
# ----
#
def do_load_singlepKDcalc(self,selection,count=1,total=1):
"""Load a single pKD calculation"""
print 'Loading zipfile'
calc_selected,info,setup=self.calcs_displayed[selection]
self.pKDname.set('Loading: %s.... (%3d of %3d)' %(calc_selected,count,total))
self.pKD_IFwin.update_idletasks()
#
# Get the zipfile from the server
#
zip_URL=self.get_pKD_response({'action':'get_pka_calc','PDBID':calc_selected})[0]
#
import urllib, zipfile, tempfile, os
f = urllib.urlopen(zip_URL)
tmp=tempfile.TemporaryFile()
tmp.writelines(f.read())
f.close()
zfile=zipfile.ZipFile(tmp,'r')
tmpdir=tempfile.mkdtemp()
pdbfilename=None
for file in zfile.namelist():
newname=os.path.join(tmpdir,file)
fd=open(newname,'w')
fd.writelines(zfile.read(file))
fd.close()
if newname[-4:]=='.pdb':
pdbfilename=newname
self.load_titration(pdbfilename)
return
#
# ----
#
def do_load_singlepKDcalcXML(self,selection,count=1,total=1):
print 'Loading XMLfile'
calc_selected,info,setup=self.calcs_displayed[selection]
self.pKDname.set('Loading: %s.... (%3d of %3d)' %(calc_selected,count,total))
self.pKD_IFwin.update_idletasks()
#
# Get the XML data from the server
#
request ={}
X=HTTP_handler(self.URL,self.HOST,self.PORT)
request['action']='get_xml_pKa_calc'
request['client']='PKATOOL'
request['PDBID']=calc_selected
X.load(request)
lines=X.request(return_type='file-object')
#print lines.read()
#
# Parse the XML
#
proteins=[]
from xml.dom import minidom
xmldoc=minidom.parse(lines)
pKas=xmldoc.firstChild
for calc in pKas.childNodes:
ID=calc.attributes['id'].value
for attr in calc.childNodes:
pKaval=attr
print '----'
return
#
# -----
#
def get_titratable_groups(self):
"""Get the titratable groups in the current file, with the selected method"""
engine=self.calc_engine.get()
if engine=='PDB2pKa':
self.get_pdb2pKa_groups()
elif engine=='pKaTool' or engine=='Manual energies':
self.Protool_instance.get_titratable_groups()
PT_grps=self.Protool_instance.titratable_groups.keys()
PT_grps.sort()
self.groups=[]
for residue in PT_grps:
for group in self.Protool_instance.titratable_groups[residue]:
self.groups.append(self.Protool_instance.get_titgroup_name(residue,group))
else:
print 'Not implemented yet'
return
#
# ----
#
def file_manager_dialog(self):
"""Get the titratable groups as defined by the PDB2pKa routines"""
self.file_win=Toplevel()
self.file_win.geometry('+400+500')
if self.pdbfilename:
self.files_included=[[self.pdbfilename,'Protein']]
#self.files_included=[]
self.print_included()
return
#
# -----
#
def print_included(self):
"""Print the files that we are considering"""
#
# Do we have any files?
#
if len(self.files_included)<1:
return
Label(self.file_win,text='Files holding structure data').grid(row=0,column=0,columnspan=3)
#
# Print the file(s) that we use
#
import os
row=1
count=1
for name,description in self.files_included:
Label(self.file_win,text='%d' %count,bg='white').grid(row=row,column=0,sticky='news')
Label(self.file_win,text='%s' %os.path.split(name)[1],bg='white').grid(row=row,column=1,sticky='news')
Label(self.file_win,text='%s' %description,bg='white').grid(row=row,column=2,sticky='news')
count=count+1
row=row+1
Button(self.file_win,text='Add another file',command=self.add_file).grid(row=row,column=0,columnspan=1,sticky='nws')
Button(self.file_win,text='Done',command=self.really_getgroups).grid(row=row,column=2,columnspan=1,sticky='nes')
self.update()
return
#
# -----
#
def really_getgroups(self):
"""Divide the files listed into protein and ligand, and initialise the routines"""
proteins=[]
ligands=[]
for file,ftype in self.files_included:
if ftype=='Protein':
proteins.append(file)
else:
ligands.append(file)
#
# Right now we can only deal with one file of each type
#
pdbfile=proteins[0]
if len(ligands)>0:
ligandfile=ligands[0]
else:
ligandfile=None
#
# Now get the groups
#
if self.pdb2pKa_path.get()=="":
tkMessageBox.showerror('PDB2pKa missing',
'You did not specify the path to PDB2pKa')
return
import sys
sys.path.append('/home/nielsen/lib/pdb2pqr_develop/pKa')
import pka
myProtein, myRoutines, myForcefield,apbs_setup, ligand_titratable_groups=pka.pre_init(pdbfile=pdbfile,ff='parse',ligand=ligandfile)
print 'Return values from pdb2pqr pka'
print myProtein
print myRoutines
print myForcefield
print apbs_setup
print ligand_titratble_groups
groups=pka.pKaRoutines(myProtein, myRoutines, myForcefield,apbs_setup)
print '-------'
print groups
return
#
# ----
#
def add_file(self,event=None):
"""Browse for another file to add to self.included_files"""
import os
newfilename=tkFileDialog.askopenfilename(defaultextension='.mol2',
initialdir=os.getcwd(),
filetypes=[("Mol2 file",'*.mol2'),
("PDB file","*.pdb"),
("All files","*.*")],
parent=self.groupwin)
if not newfilename:
return
ftype='Protein'
if os.path.isfile(newfilename):
if newfilename[-4:].lower()=='mol2':
ftype='Ligand'
#
self.files_included.append([newfilename,ftype])
self.print_included()
return
#
# -----
#
def init_calc(self):
"""Initialise a pKa calculation
This involves getting the titratable groups"""
#
# Do we have a PDB file?
#
#if not self.pdbfilename:
# import tkMessageBox
# tkMessageBox.showwarning('No PDB file',
# 'You have to load a PDB file before performing a pKa calculation')
# return
#
# Open the file manager dialog
#
self.file_manager_dialog()
#
# Open the window
#
self.initwin=Toplevel()
self.initwin.transient(self.master)
self.set_geometry(self.master,self.initwin)
#
#
# Start window
#
self.initwin.title('Initialise pKa calculation')
Label(self.initwin,text='Select the titratable groups you want to include in the calculation').grid(row=0,column=0,columnspan=5)
Label(self.initwin,text='You cannot add or delete groups from this calculation once you click "Select these groups"').grid(row=1,column=0,columnspan=5)
#
# Listbox for all titratable groups
#
yscrollbar=Scrollbar(self.initwin,orient='vertical',width=10)
yscrollbar.grid(row=2,column=2,rowspan=10,sticky='nws')
height=10
self.all_groups=Listbox(self.initwin,
bg='white',
fg='black',
height=height,width=15,
yscrollcommand=yscrollbar.set,
selectmode=MULTIPLE)
self.all_groups.grid(row=2,column=0,columnspan=2,rowspan=10,sticky='news')
yscrollbar.config(command=self.all_groups.yview)
#
# List containing the groups that are selected
#
self.calc_selected_list=[]
#
# Listbox for titratable groups to be included in calculation
#
yscrollbar2=Scrollbar(self.initwin,orient='vertical',width=10)
yscrollbar2.grid(row=2,column=6,rowspan=10,sticky='nws')
height=10
self.calcgroups_selected=Listbox(self.initwin,
bg='white',
fg='black',
height=height,width=15,
yscrollcommand=yscrollbar2.set,
selectmode=MULTIPLE)
self.calcgroups_selected.grid(row=2,column=4,columnspan=2,rowspan=10,sticky='news')
yscrollbar2.config(command=self.calcgroups_selected.yview)
#
# Calculation engine setup
#
self.set_enecalc_engine(win=self.initwin,row=2,column=3,command=self.update_calcIF_selected_groups)
self.update_calcIF_selected_groups()
Button(self.initwin,text='Add -->',command=self.calcIF_select_group).grid(row=5,column=3)
Button(self.initwin,text='<-- Remove',command=self.calcIF_remove_group).grid(row=6,column=3)
#
# pH start, stop and step
#
self.pHstart=0.1
self.pHstop=12.0
self.pHstep=0.1
#
# OK and Cancel buttons
#
Button(self.initwin,text='Select these groups',command=self.IFsetup_calc).grid(row=13,column=0,columnspan=2,sticky='news')
Button(self.initwin,text='Select all groups',command=self.calcIF_selectall).grid(row=13,column=2,columnspan=2,sticky='news')
Button(self.initwin,text='Add structure file(PDB/mol2)').grid(row=13,column=4)
Button(self.initwin,text='Add extra titgroup in loaded structure')
Button(self.initwin,text='Cancel',command=self.initwin.destroy).grid(row=13,column=5,sticky='news')
self.status=StringVar()
self.status.set('Waiting...')
Label(self.initwin,text='Status:').grid(row=14,column=0)
Label(self.initwin,textvariable=self.status).grid(row=14,column=1)
#
# Button for defining titratable groups
#
Button(self.initwin,text='Edit titgroup definitions').grid(row=14,column=0)
return
#
# ----
#
def calcIF_selectall(self):
raise Exception("Jens forgot to write this function")
return
def calcIF_select_group(self):
"""Add a single or multiple groups to the selected listbox"""
for sel in self.all_groups.curselection():
group_num=int(sel)
sel_group=self.groups[group_num]
if not sel_group in self.calc_selected_list:
self.calc_selected_list.append(sel_group)
self.update_calcIF_selected_groups()
return
def calcIF_remove_group(self):
"""Remove a single or multiple groups from the selected listbox"""
for sel in self.calcgroups_selected.curselection():
group_num=int(sel)
sel_group=self.groups[group_num]
if sel_group in self.calc_selected_list:
self.calc_selected_list.remove(sel_group)
self.update_calcIF_selected_groups()
return
#
# -----
#
def update_calcIF_selected_groups(self):
"""Update the groups in the listbox with groups selected for the calculation"""
self.calc_selected_list.sort()
self.calcgroups_selected.delete(0,END)
for group in self.calc_selected_list:
self.calcgroups_selected.insert(END,group)
# Code for selecting calculation engine?
return
#
# ----
#
def IFsetup_calc(self):
"""Set up a new calculation with the present titratable groups
Set all energies to zero
"""
import os
pdbfilename_short=os.path.split(self.pdbfilename)[1]
label=self.labels[self.label_count]
import pKaIO
self.calcs[pdbfilename_short]={'instance':pKaIO.pKaIO(self.pdbfilename),'selected_groups':[],'label':label}
self.label_count=self.label_count+1
#
# Matrix
#
self.status.set('Constructing matrix')
self.master.update_idletasks()
self.construct_empty_matrix()
#self.calcs[pdbfilename_short]['titcurv']=self.X2.readtitcurv()
#
# read the pKa values
#
#self.calcs[pdbfilename_short]['pkavals']=self.X2.readpka()
#
# Store the PDB file
#
fd=open(self.pdbfilename)
self.calcs[pdbfilename_short]['pdblines']=fd.readlines()
fd.close()
#
# Destroy the init win
#
self.initwin.destroy()
return
#
# ----
#
def construct_empty_matrix(self):
"""Construct an empty matrix and save it"""
self.matrix={}
for group in self.groups:
if not self.matrix.has_key(group):
self.matrix[group]={}
for group2 in self.groups:
self.matrix[group][group2]=[0.0,0.0,0.0,0.0]
#
# Save it
#
import os
pdbfilename_short=os.path.split(self.pdbfilename)[1]
pKa_instance=self.calcs[pdbfilename_short]['instance']
pKa_instance.matrix=self.matrix.copy()
pKa_instance.write_matrix(pKa_instance.matrix_file)
return
#
# -----
#
def do_pka_calculation(self):
"""Run a full pKa calculation on the selected groups"""
self.chwin=Toplevel()
self.chwin.transient(self.master)
self.set_geometry(self.master,self.chwin)
self.chwin.title('Choose calculation type')
self.set_enecalc_engine(self.chwin,row=0,column=0)
Button(self.chwin,text='Run calculation',command=self.pKacalc_driver).grid(row=5,column=0)
return
#
# ----
#
def pKacalc_driver(self):
"""Start the pKa calculation"""
engine=self.calc_engine.get()
if engine=='PDB2pKa':
pass
elif engine=='pKaTool':
self.pKaTool_pKa()
else:
print 'Not implemented yet'
return
#
# ----
#
def pdb2pKa(self):
"""Interface to pdb2pka"""
return
#
# ----
#
def propka(self):
"""Run a pKa calculation on the propka server"""
return
#
# ----
#
def set_enecalc_engine(self,win,row=0,column=0,command=None):
"""Set the calculation engine"""
engines=['PDB2pKa','PropKa','WHAT IF','pKaTool','Manual energies']
active_engines=['pKaTool','Manual energies']
engines.sort()
Label(win,text='Select calculation engine').grid(row=row,column=column)
if not command:
command=self.dummy
for engine in engines:
if engine in active_engines:
Radiobutton(win,variable=self.calc_engine,value=engine,text=engine,
activebackground='red',command=command).grid(row=row+1,column=column,sticky='nws')
row=row+1
return
#
# ----
#
def dummy(self):
"""Dummy function"""
return
#
# ----
#
def set_titcurv_method(self):
return
#
# ----
#
def calculate_matrix_dialog(self):
"""Open a dialog for calculating or entering energies for the interaction energy matrix"""
if not self.pdbfilename:
tkMessageBox.showwarning('No PDB file',
'You must load a PDB file before you can calculate the matrix')
return
#
# Open the window
#
self.mwin=Toplevel()
self.mwin.title('Interaction energy matrix')
self.set_geometry(self.master,self.mwin)
#
# Canvas for the matrix
#
self.matrix_canvas=Pmw.ScrolledCanvas(self.mwin,
borderframe = 1,
labelpos = 'n',
label_text = 'Interaction energy matrix',
usehullsize = 1,
hull_width = 800,
hull_height = 800,
hscrollmode='dynamic',
vscrollmode='dynamic'
)
self.matrix_canvas.interior().configure(bg='white')
self.matrix_canvas.grid(row=0,column=0,rowspan=10,columnspan=10,sticky='news')
#
# Buttons for the calculation engine
#
self.set_enecalc_engine(win=self.mwin,row=11,column=0)
#
# What do we display?
#
row=11
Label(self.mwin,text='Display').grid(row=row,column=3)
self.m_display=StringVar()
for disp in ['Interaction energy (kT/e)','Interaction energy (dpKa)','Distance (A)','eps(effective)']:
Radiobutton(self.mwin,variable=self.m_display,value=disp,text=disp,
activebackground='red',command=self.update_matrix_display).grid(row=row+1,column=3,sticky='nws')
row=row+1
self.m_display.set('Interaction energy (kT/e)')
#
# Add action buttons
#
row=row+1
Button(self.mwin,text='Close',command=self.mwin.destroy).grid(row=row,column=0)
Button(self.mwin,text='Recalculate matrix',command=self.calculate_matrix_driver).grid(row=row,column=1)
#
# Status field
#
self.status=StringVar()
self.status.set('Waiting...')
Label(self.mwin,text='Status:',bg='white').grid(row=14,column=0,sticky='news')
Label(self.mwin,textvariable=self.status,bg='white').grid(row=14,column=1,sticky='news')
#
# Update the display
#
self.update_matrix_display()
#
# Make sure that the sliders are in place
#
self.matrix_canvas.resizescrollregion()
return
#
# ----
#
def calculate_matrix_driver(self):
"""Calculate the matrix"""
engine=self.calc_engine.get()
if engine=='PDB2pKa':
pass
elif engine=='pKaTool':
self.matrix=self.Protool_instance.calculate_matrix(filename=self.pdbfilename,updater=self.update_matrix_status)
self.update_matrix_display()
else:
print 'Not implemented yet'
return
#
# ----
#
def update_matrix_status(self,message):
"""Update the matrix calc status"""
self.status.set(message)
self.master.update_idletasks()
self.mwin.update()
return
#
# ----
#
def update_matrix_display(self):
"""Update the matrix on the screen when we choose a new calculation method"""
#
# Delete all object on the canvas
#
objects=self.matrix_canvas.find_all()
for obj in objects:
self.matrix_canvas.delete(obj)
#
# Precompute the group numbers
#
self.group_numbers={}
count=0
for group in self.groups:
self.group_numbers[group]=count
count=count+1
#
# Get the titratable groups
#
matrix_balloon=Pmw.Balloon(self.mwin)
import math
for group in self.groups:
#
# Labels
#
x,y=self.get_matrix_cell_position(group,group)
self.matrix_canvas.create_text(x,self.matrix_y_add,text=group,anchor='s')
self.matrix_canvas.create_text(0,y,text=group,anchor='w')
#
# Insert the energies
#
for partner in self.groups:
x,y=self.get_matrix_cell_position(group,partner,type='box')
if group==partner:
self.matrix_canvas.create_rectangle(x,y,x+self.matrix_x_add,y+self.matrix_y_add,fill='black')
else:
record=self.matrix[group][partner]
disp=self.m_display.get()
intene=record[0]-record[1]-record[2]+record[3]
if disp=='Interaction energy (kT/e)':
pass
elif disp=='Interaction energy (dpKa)':
intene=intene/math.log(10)
elif disp=='Distance (A)':
import tkMessageBox
tkMessageBox.showinfo('Not done yet',
'Be nice to Jens to get him to do this',
parent=self.mwin)
return
elif disp=='eps(effective)':
import tkMessageBox
tkMessageBox.showinfo('Not done yet',
'Be nice to Jens to get him to do this',
parent=self.mwin)
return
if intene>0.0:
color='red'
else:
color='blue'
self.matrix_canvas.create_rectangle(x,y,x+self.matrix_x_add,y+self.matrix_y_add)
x,y=self.get_matrix_cell_position(group,partner)
handle=self.matrix_canvas.create_text(x,y,text='%5.3f' %intene,anchor='center',fill=color)
matrix_balloon.tagbind(self.matrix_canvas,handle,'%s-%s' %(group,partner))
#self.Entry=Entry(self.mwin,width=10,textvariable=energy,bg='white')
#self.win=self.matrix_canvas.create_window(x,y,window=self.Entry,anchor='nw')
return
#
# ----
#
def get_matrix_cell_position(self,group,partner,type='text'):
"""Calculate the x and y coordinates the upper left corner of a matrix cell
setting type to text gives the center of the cell
"""
self.matrix_x_add=80
self.matrix_y_add=25
x=self.matrix_x_add+self.group_numbers[partner]*self.matrix_x_add
y=self.matrix_y_add+self.group_numbers[group]*self.matrix_y_add
if type=='text':
y=y+int(0.5*self.matrix_y_add)
x=x+int(0.5*self.matrix_x_add)
return x,y
class HTTP_handler:
"""Send an HTTP request to a remote URL"""
def __init__(self, uri, host, port):
#
# Initialise variables
#
#
import time, os, random
self.uri = uri
self.host = host
self.port = port
self.queryString = None
self.boundary= '%s%s_%s_%s' % \
('-----', int(time.time()), os.getpid(), random.randint(1,10000))
return
#
# -----
#
def load(self, args, headerDict=None):
#
# Loads multiple files
#
import string
total = []
for (name,values) in args.items():
data = values
hdr = []; part = []
hdr.append('Content-Disposition: form-data; name="'+name+'"')
part.append("%s\n\n%s" % (string.joinfields(hdr,'\n'), data))
#print string.joinfields(hdr,'\n')
total.append('--%s\n' % self.boundary)
total.append(string.joinfields(part, "\n--%s\n" % self.boundary))
total.append('\n')
self.queryString = string.joinfields(total, '')
return
#
# -----
#
def request(self,return_type='text'):
"""Send the request"""
import httplib
query = self.queryString
contentType = 'multipart/form-data; boundary=%s' % self.boundary
contentLength = str(len(query))
h = httplib.HTTP()
h.connect(self.host, self.port)
h.putrequest('POST', self.uri)
h.putheader('Accept', '*/*')
h.putheader('Proxy-Connection', 'Keep-Alive')
h.putheader('User-Agent', 'Bond/007 [en] (WinNT; U)')
h.putheader('Content-Type', contentType)
h.putheader('Content-Length', contentLength)
h.endheaders()
h.send(query)
rcode, rmsg, headers= h.getreply()
response = h.getfile()
if return_type=='text':
if rcode != 200:
F='httpderror'
msg = "error: %s, %s\n%s %s" % (rcode, self.uri, rmsg, response)
tkMessageBox.showwarning("Could not contact pKD server",
"Please check that your internet connection is functional.")
return None
else:
return response.read()
else:
#
# Return the file-like object
#
return response
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2012, Dane Summers <dsummers@pinedesk.biz>
# (c) 2013, Mike Grozak <mike.grozak@gmail.com>
# (c) 2013, Patrick Callahan <pmc@patrickcallahan.com>
# (c) 2015, Evan Kaufman <evan@digitalflophouse.com>
# (c) 2015, Luca Berruti <nadirio@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cron
short_description: Manage cron.d and crontab entries.
description:
- Use this module to manage crontab and environment variables entries. This module allows
you to create environment variables and named crontab entries, update, or delete them.
- 'When crontab jobs are managed: the module includes one line with the description of the
crontab entry C("#Ansible: <name>") corresponding to the "name" passed to the module,
which is used by future ansible/module calls to find/check the state. The "name"
parameter should be unique, and changing the "name" value will result in a new cron
task being created (or a different one being removed).'
- 'When environment variables are managed: no comment line is added, but, when the module
needs to find/check the state, it uses the "name" parameter to find the environment
variable definition line.'
- 'When using symbols such as %, they must be properly escaped.'
version_added: "0.9"
options:
name:
description:
- Description of a crontab entry or, if env is set, the name of environment variable.
Required if state=absent. Note that if name is not set and state=present, then a
new crontab entry will always be created, regardless of existing ones.
default: null
required: false
user:
description:
- The specific user whose crontab should be modified.
required: false
default: root
job:
description:
- The command to execute or, if env is set, the value of environment variable.
The command should not contain line breaks.
Required if state=present.
required: false
aliases: ['value']
default: null
state:
description:
- Whether to ensure the job or environment variable is present or absent.
required: false
default: present
choices: [ "present", "absent" ]
cron_file:
description:
- If specified, uses this file instead of an individual user's crontab.
If this is a relative path, it is interpreted with respect to
/etc/cron.d. (If it is absolute, it will typically be /etc/crontab).
Many linux distros expect (and some require) the filename portion to consist solely
of upper- and lower-case letters, digits, underscores, and hyphens.
To use the C(cron_file) parameter you must specify the C(user) as well.
required: false
default: null
backup:
description:
- If set, create a backup of the crontab before it is modified.
The location of the backup is returned in the C(backup_file) variable by this module.
required: false
choices: [ "yes", "no" ]
default: no
minute:
description:
- Minute when the job should run ( 0-59, *, */2, etc )
required: false
default: "*"
hour:
description:
- Hour when the job should run ( 0-23, *, */2, etc )
required: false
default: "*"
day:
description:
- Day of the month the job should run ( 1-31, *, */2, etc )
required: false
default: "*"
aliases: [ "dom" ]
month:
description:
- Month of the year the job should run ( 1-12, *, */2, etc )
required: false
default: "*"
weekday:
description:
- Day of the week that the job should run ( 0-6 for Sunday-Saturday, *, etc )
required: false
default: "*"
aliases: [ "dow" ]
reboot:
description:
- If the job should be run at reboot. This option is deprecated. Users should use special_time.
version_added: "1.0"
required: false
default: "no"
choices: [ "yes", "no" ]
special_time:
description:
- Special time specification nickname.
version_added: "1.3"
required: false
default: null
choices: [ "reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly" ]
disabled:
description:
- If the job should be disabled (commented out) in the crontab. Only has effect if state=present
version_added: "2.0"
required: false
default: false
env:
description:
- If set, manages a crontab's environment variable. New variables are added on top of crontab.
"name" and "value" parameters are the name and the value of environment variable.
version_added: "2.1"
required: false
default: "no"
choices: [ "yes", "no" ]
insertafter:
description:
- Used with C(state=present) and C(env). If specified, the environment variable will be
inserted after the declaration of specified environment variable.
version_added: "2.1"
required: false
default: null
insertbefore:
description:
- Used with C(state=present) and C(env). If specified, the environment variable will be
inserted before the declaration of specified environment variable.
version_added: "2.1"
required: false
default: null
requirements:
- cron
author:
- "Dane Summers (@dsummersl)"
- 'Mike Grozak'
- 'Patrick Callahan'
- 'Evan Kaufman (@EvanK)'
- 'Luca Berruti (@lberruti)'
"""
EXAMPLES = '''
# Ensure a job that runs at 2 and 5 exists.
# Creates an entry like "0 5,2 * * ls -alh > /dev/null"
- cron:
name: "check dirs"
minute: "0"
hour: "5,2"
job: "ls -alh > /dev/null"
# Ensure an old job is no longer present. Removes any job that is prefixed
# by "#Ansible: an old job" from the crontab
- cron:
name: "an old job"
state: absent
# Creates an entry like "@reboot /some/job.sh"
- cron:
name: "a job for reboot"
special_time: reboot
job: "/some/job.sh"
# Creates an entry like "PATH=/opt/bin" on top of crontab
- cron:
name: PATH
env: yes
value: /opt/bin
# Creates an entry like "APP_HOME=/srv/app" and insert it after PATH
# declaration
- cron:
name: APP_HOME
env: yes
value: /srv/app
insertafter: PATH
# Creates a cron file under /etc/cron.d
- cron:
name: yum autoupdate
weekday: 2
minute: 0
hour: 12
user: root
job: "YUMINTERACTIVE: 0 /usr/sbin/yum-autoupdate"
cron_file: ansible_yum-autoupdate
# Removes a cron file from under /etc/cron.d
- cron:
name: "yum autoupdate"
cron_file: ansible_yum-autoupdate
state: absent
# Removes "APP_HOME" environment variable from crontab
- cron:
name: APP_HOME
env: yes
state: absent
'''
import os
import platform
import pipes
import pwd
import re
import sys
import tempfile
from ansible.module_utils.basic import AnsibleModule, get_platform
CRONCMD = "/usr/bin/crontab"
class CronTabError(Exception):
pass
class CronTab(object):
"""
CronTab object to write time based crontab file
user - the user of the crontab (defaults to root)
cron_file - a cron file under /etc/cron.d, or an absolute path
"""
def __init__(self, module, user=None, cron_file=None):
self.module = module
self.user = user
self.root = (os.getuid() == 0)
self.lines = None
self.ansible = "#Ansible: "
self.existing = ''
if cron_file:
if os.path.isabs(cron_file):
self.cron_file = cron_file
else:
self.cron_file = os.path.join('/etc/cron.d', cron_file)
else:
self.cron_file = None
self.read()
def read(self):
# Read in the crontab from the system
self.lines = []
if self.cron_file:
# read the cronfile
try:
f = open(self.cron_file, 'r')
self.existing = f.read()
self.lines = self.existing.splitlines()
f.close()
except IOError:
# cron file does not exist
return
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
# using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
(rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
self.existing = out
lines = out.splitlines()
count = 0
for l in lines:
if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
not re.match( r'# \(/tmp/.*installed on.*\)', l) and
not re.match( r'# \(.*version.*\)', l)):
self.lines.append(l)
else:
pattern = re.escape(l) + '[\r\n]?'
self.existing = re.sub(pattern, '', self.existing, 1)
count += 1
def is_empty(self):
if len(self.lines) == 0:
return True
else:
return False
def write(self, backup_file=None):
"""
Write the crontab to the system. Saves all information.
"""
if backup_file:
fileh = open(backup_file, 'w')
elif self.cron_file:
fileh = open(self.cron_file, 'w')
else:
filed, path = tempfile.mkstemp(prefix='crontab')
os.chmod(path, int('0644', 8))
fileh = os.fdopen(filed, 'w')
fileh.write(self.render())
fileh.close()
# return if making a backup
if backup_file:
return
# Add the entire crontab back to the user crontab
if not self.cron_file:
# quoting shell args for now but really this should be two non-shell calls. FIXME
(rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
self.module.fail_json(msg=err)
# set SELinux permissions
if self.module.selinux_enabled() and self.cron_file:
self.module.set_default_selinux_context(self.cron_file, False)
def do_comment(self, name):
return "%s%s" % (self.ansible, name)
def add_job(self, name, job):
# Add the comment
self.lines.append(self.do_comment(name))
# Add the job
self.lines.append("%s" % (job))
def update_job(self, name, job):
return self._update_job(name, job, self.do_add_job)
def do_add_job(self, lines, comment, job):
lines.append(comment)
lines.append("%s" % (job))
def remove_job(self, name):
return self._update_job(name, "", self.do_remove_job)
def do_remove_job(self, lines, comment, job):
return None
def add_env(self, decl, insertafter=None, insertbefore=None):
if not (insertafter or insertbefore):
self.lines.insert(0, decl)
return
if insertafter:
other_name = insertafter
elif insertbefore:
other_name = insertbefore
other_decl = self.find_env(other_name)
if len(other_decl) > 0:
if insertafter:
index = other_decl[0]+1
elif insertbefore:
index = other_decl[0]
self.lines.insert(index, decl)
return
self.module.fail_json(msg="Variable named '%s' not found." % other_name)
def update_env(self, name, decl):
return self._update_env(name, decl, self.do_add_env)
def do_add_env(self, lines, decl):
lines.append(decl)
def remove_env(self, name):
return self._update_env(name, '', self.do_remove_env)
def do_remove_env(self, lines, decl):
return None
def remove_job_file(self):
try:
os.unlink(self.cron_file)
return True
except OSError:
# cron file does not exist
return False
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
def find_job(self, name, job=None):
# attempt to find job by 'Ansible:' header comment
comment = None
for l in self.lines:
if comment is not None:
if comment == name:
return [comment, l]
else:
comment = None
elif re.match( r'%s' % self.ansible, l):
comment = re.sub( r'%s' % self.ansible, '', l)
# failing that, attempt to find job by exact match
if job:
for i, l in enumerate(self.lines):
if l == job:
# if no leading ansible header, insert one
if not re.match( r'%s' % self.ansible, self.lines[i-1]):
self.lines.insert(i, self.do_comment(name))
return [self.lines[i], l, True]
# if a leading blank ansible header AND job has a name, update header
elif name and self.lines[i-1] == self.do_comment(None):
self.lines[i-1] = self.do_comment(name)
return [self.lines[i-1], l, True]
return []
def find_env(self, name):
for index, l in enumerate(self.lines):
if re.match( r'^%s=' % name, l):
return [index, l]
return []
def get_cron_job(self,minute,hour,day,month,weekday,job,special,disabled):
# normalize any leading/trailing newlines (ansible/ansible-modules-core#3791)
job = job.strip('\r\n')
if disabled:
disable_prefix = '#'
else:
disable_prefix = ''
if special:
if self.cron_file:
return "%s@%s %s %s" % (disable_prefix, special, self.user, job)
else:
return "%s@%s %s" % (disable_prefix, special, job)
else:
if self.cron_file:
return "%s%s %s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,self.user,job)
else:
return "%s%s %s %s %s %s %s" % (disable_prefix,minute,hour,day,month,weekday,job)
def get_jobnames(self):
jobnames = []
for l in self.lines:
if re.match( r'%s' % self.ansible, l):
jobnames.append(re.sub( r'%s' % self.ansible, '', l))
return jobnames
def get_envnames(self):
envnames = []
for l in self.lines:
if re.match( r'^\S+=' , l):
envnames.append(l.split('=')[0])
return envnames
def _update_job(self, name, job, addlinesfunction):
ansiblename = self.do_comment(name)
newlines = []
comment = None
for l in self.lines:
if comment is not None:
addlinesfunction(newlines, comment, job)
comment = None
elif l == ansiblename:
comment = l
else:
newlines.append(l)
self.lines = newlines
if len(newlines) == 0:
return True
else:
return False # TODO add some more error testing
def _update_env(self, name, decl, addenvfunction):
newlines = []
for l in self.lines:
if re.match( r'^%s=' % name, l):
addenvfunction(newlines, decl)
else:
newlines.append(l)
self.lines = newlines
def render(self):
"""
Render this crontab as it would be in the crontab.
"""
crons = []
for cron in self.lines:
crons.append(cron)
result = '\n'.join(crons)
if result:
result = result.rstrip('\r\n') + '\n'
return result
def _read_user_execute(self):
"""
Returns the command line for reading a crontab
"""
user = ''
if self.user:
if platform.system() == 'SunOS':
return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
elif platform.system() == 'AIX':
return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
elif platform.system() == 'HP-UX':
return "%s %s %s" % (CRONCMD , '-l', pipes.quote(self.user))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
"""
Return the command line for writing a crontab
"""
user = ''
if self.user:
if platform.system() in ['SunOS', 'HP-UX', 'AIX']:
return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
elif pwd.getpwuid(os.getuid())[0] != self.user:
user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
#==================================================
def main():
# The following example playbooks:
#
# - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
#
# - name: do the job
# cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
#
# - name: no job
# cron: name="an old job" state=absent
#
# - name: sets env
# cron: name="PATH" env=yes value="/bin:/usr/bin"
#
# Would produce:
# PATH=/bin:/usr/bin
# # Ansible: check dirs
# * * 5,2 * * ls -alh > /dev/null
# # Ansible: do the job
# * * 5,2 * * /some/dir/job.sh
module = AnsibleModule(
argument_spec = dict(
name=dict(required=False),
user=dict(required=False),
job=dict(required=False, aliases=['value']),
cron_file=dict(required=False),
state=dict(default='present', choices=['present', 'absent']),
backup=dict(default=False, type='bool'),
minute=dict(default='*'),
hour=dict(default='*'),
day=dict(aliases=['dom'], default='*'),
month=dict(default='*'),
weekday=dict(aliases=['dow'], default='*'),
reboot=dict(required=False, default=False, type='bool'),
special_time=dict(required=False,
default=None,
choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"],
type='str'),
disabled=dict(default=False, type='bool'),
env=dict(required=False, type='bool'),
insertafter=dict(required=False),
insertbefore=dict(required=False),
),
supports_check_mode = True,
mutually_exclusive=[
['reboot', 'special_time'],
['insertafter', 'insertbefore'],
]
)
name = module.params['name']
user = module.params['user']
job = module.params['job']
cron_file = module.params['cron_file']
state = module.params['state']
backup = module.params['backup']
minute = module.params['minute']
hour = module.params['hour']
day = module.params['day']
month = module.params['month']
weekday = module.params['weekday']
reboot = module.params['reboot']
special_time = module.params['special_time']
disabled = module.params['disabled']
env = module.params['env']
insertafter = module.params['insertafter']
insertbefore = module.params['insertbefore']
do_install = state == 'present'
changed = False
res_args = dict()
warnings = list()
if cron_file:
cron_file_basename = os.path.basename(cron_file)
if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename
+ ' solely of upper- and lower-case letters, digits, underscores, and hyphens')
# Ensure all files generated are only writable by the owning user. Primarily relevant for the cron_file option.
os.umask(int('022', 8))
crontab = CronTab(module, user, cron_file)
module.debug('cron instantiated - name: "%s"' % name)
if module._diff:
diff = dict()
diff['before'] = crontab.existing
if crontab.cron_file:
diff['before_header'] = crontab.cron_file
else:
if crontab.user:
diff['before_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['before_header'] = 'crontab'
# --- user input validation ---
if (special_time or reboot) and \
(True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
module.fail_json(msg="You must specify time and date fields or special time.")
# cannot support special_time on solaris
if (special_time or reboot) and get_platform() == 'SunOS':
module.fail_json(msg="Solaris does not support special_time=... or @reboot")
if cron_file and do_install:
if not user:
module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")
if job is None and do_install:
module.fail_json(msg="You must specify 'job' to install a new cron job or variable")
if (insertafter or insertbefore) and not env and do_install:
module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")
if reboot:
special_time = "reboot"
# if requested make a backup before making a change
if backup and not module.check_mode:
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
if crontab.cron_file and not name and not do_install:
if module._diff:
diff['after'] = ''
diff['after_header'] = '/dev/null'
else:
diff = dict()
if module.check_mode:
changed = os.path.isfile(crontab.cron_file)
else:
changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state,diff=diff)
if env:
if ' ' in name:
module.fail_json(msg="Invalid name for environment variable")
decl = '%s="%s"' % (name, job)
old_decl = crontab.find_env(name)
if do_install:
if len(old_decl) == 0:
crontab.add_env(decl, insertafter, insertbefore)
changed = True
if len(old_decl) > 0 and old_decl[1] != decl:
crontab.update_env(name, decl)
changed = True
else:
if len(old_decl) > 0:
crontab.remove_env(name)
changed = True
else:
if do_install:
for char in ['\r', '\n']:
if char in job.strip('\r\n'):
warnings.append('Job should not contain line breaks')
break
job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
old_job = crontab.find_job(name, job)
if len(old_job) == 0:
crontab.add_job(name, job)
changed = True
if len(old_job) > 0 and old_job[1] != job:
crontab.update_job(name, job)
changed = True
if len(old_job) > 2:
crontab.update_job(name, job)
changed = True
else:
old_job = crontab.find_job(name)
if len(old_job) > 0:
crontab.remove_job(name)
changed = True
# no changes to env/job, but existing crontab needs a terminating newline
if not changed and not crontab.existing == '':
if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
changed = True
res_args = dict(
jobs = crontab.get_jobnames(),
envs = crontab.get_envnames(),
warnings = warnings,
changed = changed
)
if changed:
if not module.check_mode:
crontab.write()
if module._diff:
diff['after'] = crontab.render()
if crontab.cron_file:
diff['after_header'] = crontab.cron_file
else:
if crontab.user:
diff['after_header'] = 'crontab for user "%s"' % crontab.user
else:
diff['after_header'] = 'crontab'
res_args['diff'] = diff
# retain the backup only if crontab or cron file have changed
if backup and not module.check_mode:
if changed:
res_args['backup_file'] = backup_file
else:
os.unlink(backup_file)
if cron_file:
res_args['cron_file'] = cron_file
module.exit_json(**res_args)
# --- should never get here
module.exit_json(msg="Unable to execute cron task.")
if __name__ == '__main__':
main()
|
|
import datetime
import mongoengine as mongo
import urllib2
import redis
from django.conf import settings
from apps.social.models import MSharedStory
from apps.profile.models import Profile
from apps.statistics.rstats import RStats, round_time
from utils import json_functions as json
from utils import db_functions
from utils import log as logging
class MStatistics(mongo.Document):
key = mongo.StringField(unique=True)
value = mongo.DynamicField()
expiration_date = mongo.DateTimeField()
meta = {
'collection': 'statistics',
'allow_inheritance': False,
'indexes': ['key'],
}
def __unicode__(self):
return "%s: %s" % (self.key, self.value)
@classmethod
def get(cls, key, default=None):
obj = cls.objects.filter(key=key).first()
if not obj:
return default
if obj.expiration_date and obj.expiration_date < datetime.datetime.now():
obj.delete()
return default
return obj.value
@classmethod
def set(cls, key, value, expiration_sec=None):
try:
obj = cls.objects.get(key=key)
except cls.DoesNotExist:
obj = cls.objects.create(key=key)
obj.value = value
if expiration_sec:
obj.expiration_date = datetime.datetime.now() + datetime.timedelta(seconds=expiration_sec)
obj.save()
@classmethod
def all(cls):
stats = cls.objects.all()
values = dict([(stat.key, stat.value) for stat in stats])
for key, value in values.items():
if key in ('avg_time_taken', 'sites_loaded', 'stories_shared'):
values[key] = json.decode(value)
elif key in ('feeds_fetched', 'premium_users', 'standard_users', 'latest_sites_loaded',
'max_sites_loaded', 'max_stories_shared'):
values[key] = int(value)
elif key in ('latest_avg_time_taken', 'max_avg_time_taken', 'last_5_min_time_taken'):
values[key] = float(value)
values['total_sites_loaded'] = sum(values['sites_loaded']) if 'sites_loaded' in values else 0
values['total_stories_shared'] = sum(values['stories_shared']) if 'stories_shared' in values else 0
return values
@classmethod
def collect_statistics(cls):
now = datetime.datetime.now()
cls.collect_statistics_premium_users()
print "Premiums: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_standard_users()
print "Standard users: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_sites_loaded()
print "Sites loaded: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_stories_shared()
print "Stories shared: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_for_db()
print "DB Stats: %s" % (datetime.datetime.now() - now)
cls.collect_statistics_feeds_fetched()
print "Feeds Fetched: %s" % (datetime.datetime.now() - now)
@classmethod
def collect_statistics_feeds_fetched(cls):
feeds_fetched = RStats.count('feed_fetch', hours=24)
cls.objects(key='feeds_fetched').update_one(upsert=True,
set__key='feeds_fetched',
set__value=feeds_fetched)
return feeds_fetched
@classmethod
def collect_statistics_premium_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
premium_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=True).count()
cls.objects(key='premium_users').update_one(upsert=True, set__key='premium_users', set__value=premium_users)
return premium_users
@classmethod
def collect_statistics_standard_users(cls):
last_day = datetime.datetime.now() - datetime.timedelta(hours=24)
standard_users = Profile.objects.filter(last_seen_on__gte=last_day, is_premium=False).count()
cls.objects(key='standard_users').update_one(upsert=True, set__key='standard_users', set__value=standard_users)
return standard_users
@classmethod
def collect_statistics_sites_loaded(cls):
now = round_time(datetime.datetime.now(), round_to=60)
sites_loaded = []
avg_time_taken = []
last_5_min_time_taken = 0
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour+1)
pipe = r.pipeline()
for m in range(60):
minute = start_hours_ago + datetime.timedelta(minutes=m)
key = "%s:%s" % (RStats.stats_type('page_load'), minute.strftime('%s'))
pipe.get("%s:s" % key)
pipe.get("%s:a" % key)
times = pipe.execute()
counts = [int(c) for c in times[::2] if c]
avgs = [float(a) for a in times[1::2] if a]
if hour == 0:
last_5_min_time_taken = round(sum(avgs[:1]) / max(1, sum(counts[:1])), 2)
if counts and avgs:
count = max(1, sum(counts))
avg = round(sum(avgs) / count, 3)
else:
count = 0
avg = 0
sites_loaded.append(count)
avg_time_taken.append(avg)
sites_loaded.reverse()
avg_time_taken.reverse()
values = (
('sites_loaded', json.encode(sites_loaded)),
('avg_time_taken', json.encode(avg_time_taken)),
('latest_sites_loaded', sites_loaded[-1]),
('latest_avg_time_taken', avg_time_taken[-1]),
('max_sites_loaded', max(sites_loaded)),
('max_avg_time_taken', max(1, max(avg_time_taken))),
('last_5_min_time_taken', last_5_min_time_taken),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_stories_shared(cls):
now = datetime.datetime.now()
stories_shared = []
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour)
end_hours_ago = now - datetime.timedelta(hours=hour+1)
shares = MSharedStory.objects.filter(
shared_date__lte=start_hours_ago,
shared_date__gte=end_hours_ago
).count()
stories_shared.append(shares)
stories_shared.reverse()
values = (
('stories_shared', json.encode(stories_shared)),
('latest_stories_shared', stories_shared[-1]),
('max_stories_shared', max(stories_shared)),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
@classmethod
def collect_statistics_for_db(cls):
lag = db_functions.mongo_max_replication_lag(settings.MONGODB)
cls.set('mongodb_replication_lag', lag)
now = round_time(datetime.datetime.now(), round_to=60)
r = redis.Redis(connection_pool=settings.REDIS_STATISTICS_POOL)
db_times = {}
latest_db_times = {}
for db in ['sql', 'mongo', 'redis', 'task_sql', 'task_mongo', 'task_redis']:
db_times[db] = []
for hour in range(24):
start_hours_ago = now - datetime.timedelta(hours=hour+1)
pipe = r.pipeline()
for m in range(60):
minute = start_hours_ago + datetime.timedelta(minutes=m)
key = "DB:%s:%s" % (db, minute.strftime('%s'))
pipe.get("%s:c" % key)
pipe.get("%s:t" % key)
times = pipe.execute()
counts = [int(c or 0) for c in times[::2]]
avgs = [float(a or 0) for a in times[1::2]]
if counts and avgs:
count = sum(counts)
avg = round(sum(avgs) / count, 3) if count else 0
else:
count = 0
avg = 0
if hour == 0:
latest_count = float(counts[-1]) if len(counts) else 0
latest_avg = float(avgs[-1]) if len(avgs) else 0
latest_db_times[db] = latest_avg / latest_count if latest_count else 0
db_times[db].append(avg)
db_times[db].reverse()
values = (
('avg_sql_times', json.encode(db_times['sql'])),
('avg_mongo_times', json.encode(db_times['mongo'])),
('avg_redis_times', json.encode(db_times['redis'])),
('latest_sql_avg', latest_db_times['sql']),
('latest_mongo_avg', latest_db_times['mongo']),
('latest_redis_avg', latest_db_times['redis']),
('latest_task_sql_avg', latest_db_times['task_sql']),
('latest_task_mongo_avg', latest_db_times['task_mongo']),
('latest_task_redis_avg', latest_db_times['task_redis']),
)
for key, value in values:
cls.objects(key=key).update_one(upsert=True, set__key=key, set__value=value)
class MFeedback(mongo.Document):
date = mongo.StringField()
summary = mongo.StringField()
subject = mongo.StringField()
url = mongo.StringField()
style = mongo.StringField()
order = mongo.IntField()
meta = {
'collection': 'feedback',
'allow_inheritance': False,
'indexes': ['style'],
'ordering': ['order'],
}
def __unicode__(self):
return "%s: (%s) %s" % (self.style, self.date, self.subject)
@classmethod
def collect_feedback(cls):
try:
data = urllib2.urlopen('https://getsatisfaction.com/newsblur/topics.widget').read()
except (urllib2.HTTPError), e:
logging.debug(" ***> Failed to collect feedback: %s" % e)
return
start = data.index('[')
end = data.rfind(']')+1
data = json.decode(data[start:end])
print data
i = 0
if len(data):
cls.objects.delete()
for feedback in data:
feedback['order'] = i
i += 1
for removal in ['about', 'less than']:
if removal in feedback['date']:
feedback['date'] = feedback['date'].replace(removal, '')
for feedback in data:
# Convert unicode to strings.
fb = dict([(str(k), v) for k, v in feedback.items()])
fb['url'] = fb['url'].replace('?utm_medium=widget&utm_source=widget_newsblur', "")
cls.objects.create(**fb)
@classmethod
def all(cls):
feedbacks = cls.objects.all()[:4]
return feedbacks
class MAnalyticsFetcher(mongo.Document):
date = mongo.DateTimeField(default=datetime.datetime.now)
feed_id = mongo.IntField()
feed_fetch = mongo.FloatField()
feed_process = mongo.FloatField()
page = mongo.FloatField()
icon = mongo.FloatField()
total = mongo.FloatField()
server = mongo.StringField()
feed_code = mongo.IntField()
meta = {
'db_alias': 'nbanalytics',
'collection': 'feed_fetches',
'allow_inheritance': False,
'indexes': ['date', 'feed_id', 'server', 'feed_code'],
'ordering': ['date'],
}
def __unicode__(self):
return "%s: %.4s+%.4s+%.4s+%.4s = %.4ss" % (self.feed_id, self.feed_fetch,
self.feed_process,
self.page,
self.icon,
self.total)
@classmethod
def add(cls, feed_id, feed_fetch, feed_process,
page, icon, total, feed_code):
server_name = settings.SERVER_NAME
if 'app' in server_name: return
if icon and page:
icon -= page
if page and feed_process:
page -= feed_process
elif page and feed_fetch:
page -= feed_fetch
if feed_process and feed_fetch:
feed_process -= feed_fetch
cls.objects.create(feed_id=feed_id, feed_fetch=feed_fetch,
feed_process=feed_process,
page=page, icon=icon, total=total,
server=server_name, feed_code=feed_code)
@classmethod
def calculate_stats(cls, stats):
return cls.aggregate(**stats)
|
|
"""Support for a State MQTT vacuum."""
import json
import voluptuous as vol
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .. import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, subscription
from ... import mqtt
from ..debug_info import log_messages
from ..mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity
from .schema import MQTT_VACUUM_SCHEMA, services_to_strings, strings_to_services
SERVICE_TO_STRING = {
SUPPORT_START: "start",
SUPPORT_PAUSE: "pause",
SUPPORT_STOP: "stop",
SUPPORT_RETURN_HOME: "return_home",
SUPPORT_FAN_SPEED: "fan_speed",
SUPPORT_BATTERY: "battery",
SUPPORT_STATUS: "status",
SUPPORT_SEND_COMMAND: "send_command",
SUPPORT_LOCATE: "locate",
SUPPORT_CLEAN_SPOT: "clean_spot",
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
DEFAULT_SERVICES = (
SUPPORT_START
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
)
ALL_SERVICES = (
DEFAULT_SERVICES
| SUPPORT_PAUSE
| SUPPORT_LOCATE
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
)
BATTERY = "battery_level"
FAN_SPEED = "fan_speed"
STATE = "state"
POSSIBLE_STATES = {
STATE_IDLE: STATE_IDLE,
STATE_DOCKED: STATE_DOCKED,
STATE_ERROR: STATE_ERROR,
STATE_PAUSED: STATE_PAUSED,
STATE_RETURNING: STATE_RETURNING,
STATE_CLEANING: STATE_CLEANING,
}
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_PAYLOAD_TURN_ON = "payload_turn_on"
CONF_PAYLOAD_TURN_OFF = "payload_turn_off"
CONF_PAYLOAD_RETURN_TO_BASE = "payload_return_to_base"
CONF_PAYLOAD_STOP = "payload_stop"
CONF_PAYLOAD_CLEAN_SPOT = "payload_clean_spot"
CONF_PAYLOAD_LOCATE = "payload_locate"
CONF_PAYLOAD_START = "payload_start"
CONF_PAYLOAD_PAUSE = "payload_pause"
CONF_SET_FAN_SPEED_TOPIC = "set_fan_speed_topic"
CONF_FAN_SPEED_LIST = "fan_speed_list"
CONF_SEND_COMMAND_TOPIC = "send_command_topic"
DEFAULT_NAME = "MQTT State Vacuum"
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES, SERVICE_TO_STRING)
DEFAULT_PAYLOAD_RETURN_TO_BASE = "return_to_base"
DEFAULT_PAYLOAD_STOP = "stop"
DEFAULT_PAYLOAD_CLEAN_SPOT = "clean_spot"
DEFAULT_PAYLOAD_LOCATE = "locate"
DEFAULT_PAYLOAD_START = "start"
DEFAULT_PAYLOAD_PAUSE = "pause"
PLATFORM_SCHEMA_STATE = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FAN_SPEED_LIST, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_PAYLOAD_CLEAN_SPOT, default=DEFAULT_PAYLOAD_CLEAN_SPOT
): cv.string,
vol.Optional(
CONF_PAYLOAD_LOCATE, default=DEFAULT_PAYLOAD_LOCATE
): cv.string,
vol.Optional(
CONF_PAYLOAD_RETURN_TO_BASE, default=DEFAULT_PAYLOAD_RETURN_TO_BASE
): cv.string,
vol.Optional(CONF_PAYLOAD_START, default=DEFAULT_PAYLOAD_START): cv.string,
vol.Optional(CONF_PAYLOAD_PAUSE, default=DEFAULT_PAYLOAD_PAUSE): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS
): vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}
)
.extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
.extend(MQTT_VACUUM_SCHEMA.schema)
)
async def async_setup_entity_state(
config, async_add_entities, config_entry, discovery_data
):
"""Set up a State MQTT Vacuum."""
async_add_entities([MqttStateVacuum(config, config_entry, discovery_data)])
class MqttStateVacuum(MqttEntity, StateVacuumEntity):
"""Representation of a MQTT-controlled state vacuum."""
def __init__(self, config, config_entry, discovery_data):
"""Initialize the vacuum."""
self._state = None
self._state_attrs = {}
self._fan_speed_list = []
MqttEntity.__init__(self, None, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA_STATE
def _setup_from_config(self, config):
supported_feature_strings = config[CONF_SUPPORTED_FEATURES]
self._supported_features = strings_to_services(
supported_feature_strings, STRING_TO_SERVICE
)
self._fan_speed_list = config[CONF_FAN_SPEED_LIST]
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key)
for key in (
CONF_PAYLOAD_START,
CONF_PAYLOAD_PAUSE,
CONF_PAYLOAD_STOP,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_CLEAN_SPOT,
CONF_PAYLOAD_LOCATE,
)
}
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_message_received(msg):
"""Handle state MQTT message."""
payload = json.loads(msg.payload)
if STATE in payload and payload[STATE] in POSSIBLE_STATES:
self._state = POSSIBLE_STATES[payload[STATE]]
del payload[STATE]
self._state_attrs.update(payload)
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC):
topics["state_position_topic"] = {
"topic": self._config.get(CONF_STATE_TOPIC),
"msg_callback": state_message_received,
"qos": self._config[CONF_QOS],
}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@property
def state(self):
"""Return state of vacuum."""
return self._state
@property
def fan_speed(self):
"""Return fan speed of the vacuum."""
return self._state_attrs.get(FAN_SPEED, 0)
@property
def fan_speed_list(self):
"""Return fan speed list of the vacuum."""
return self._fan_speed_list
@property
def battery_level(self):
"""Return battery level of the vacuum."""
return max(0, min(100, self._state_attrs.get(BATTERY, 0)))
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_start(self):
"""Start the vacuum."""
if self.supported_features & SUPPORT_START == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_START],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_pause(self):
"""Pause the vacuum."""
if self.supported_features & SUPPORT_PAUSE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_PAUSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_STOP],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if (self.supported_features & SUPPORT_FAN_SPEED == 0) or (
fan_speed not in self._fan_speed_list
):
return None
mqtt.async_publish(
self.hass,
self._set_fan_speed_topic,
fan_speed,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_RETURN_TO_BASE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_CLEAN_SPOT],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_LOCATE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return None
if params:
message = {"command": command}
message.update(params)
message = json.dumps(message)
else:
message = command
mqtt.async_publish(
self.hass,
self._send_command_topic,
message,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
|
|
#!/usr/bin/python
# Author: Rob Sanderson (azaroth@liv.ac.uk)
# Distributed and Usable under the GPL
# Version: 1.7
# Most Recent Changes: contexts, new modifier style for 1.1
#
# With thanks to Adam from IndexData and Mike Taylor for their valuable input
from shlex import shlex
from xml.sax.saxutils import escape
from xml.dom.minidom import Node, parseString
from PyZ3950.SRWDiagnostics import *
# Don't use cStringIO as it borks Unicode (apparently)
from StringIO import StringIO
import types
# Parsing strictness flags
errorOnEmptyTerm = 0 # index = "" (often meaningless)
errorOnQuotedIdentifier = 0 # "/foo/bar" = "" (unnecessary BNF restriction)
errorOnDuplicatePrefix = 0 # >a=b >a=c "" (impossible due to BNF)
fullResultSetNameCheck = 1 # srw.rsn=foo and srw.rsn=foo (mutant!!)
# Base values for CQL
serverChoiceRelation = "scr"
serverChoiceIndex = "cql.serverchoice"
order = ['=', '>', '>=', '<', '<=', '<>']
modifierSeparator = "/"
booleans = ['and', 'or', 'not', 'prox']
reservedPrefixes = {"srw" : "http://www.loc.gov/zing/cql/srw-indexes/v1.0/",
"cql" : "info:srw/cql-context-set/1/cql-v1.1"}
XCQLNamespace = "http://www.loc.gov/zing/cql/xcql/"
# End of 'configurable' stuff
class PrefixableObject:
"Root object for triple and searchClause"
prefixes = {}
parent = None
config = None
def __init__(self):
self.prefixes = {}
self.parent = None
self.config = None
def toXCQL(self, depth=0):
# Just generate our prefixes
space = " " * depth
xml = ['%s<prefixes>\n' % (space)]
for p in self.prefixes.keys():
xml.append("%s <prefix>\n%s <name>%s</name>\n%s <identifier>%s</identifier>\n%s </prefix>\n" % (space, space, escape(p), space, escape(self.prefixes[p]), space))
xml.append("%s</prefixes>\n" % (space))
return ''.join(xml)
def addPrefix(self, name, identifier):
if (errorOnDuplicatePrefix and (self.prefixes.has_key(name) or reservedPrefixes.has_key(name))):
# Maybe error
diag = Diagnostic45()
diag.details = name
raise diag;
self.prefixes[name] = identifier
def resolvePrefix(self, name):
# Climb tree
if (reservedPrefixes.has_key(name)):
return reservedPrefixes[name]
elif (self.prefixes.has_key(name)):
return self.prefixes[name]
elif (self.parent <> None):
return self.parent.resolvePrefix(name)
elif (self.config <> None):
# Config is some sort of server config which specifies defaults
return self.config.resolvePrefix(name)
else:
# Top of tree, no config, no resolution->Unknown indexset
# For client we need to allow no prefix?
#diag = Diagnostic15()
#diag.details = name
#raise diag
return None
class PrefixedObject:
"Root object for relation, relationModifier and index"
prefix = ""
prefixURI = ""
value = ""
parent = None
def __init__(self, val):
# All prefixed things are case insensitive
val = val.lower()
if val and val[0] == '"' and val[-1] == '"':
if errorOnQuotedIdentifier:
diag = Diagnostic14()
diag.details = val
raise diag
else:
val = val[1:-1]
self.value = val
self.splitValue()
def __str__(self):
if (self.prefix):
return "%s.%s" % (self.prefix, self.value)
else:
return self.value
def splitValue(self):
f = self.value.find(".")
if (self.value.count('.') > 1):
diag = Diagnostic15()
diag.details = "Multiple '.' characters: %s" % (self.value)
raise(diag)
elif (f == 0):
diag = Diagnostic15()
diag.details = "Null indexset: %s" % (irt.index)
raise(diag)
elif f >= 0:
self.prefix = self.value[:f].lower()
self.value = self.value[f+1:].lower()
def resolvePrefix(self):
if (not self.prefixURI):
self.prefixURI = self.parent.resolvePrefix(self.prefix)
return self.prefixURI
class ModifiableObject:
# Treat modifiers as keys on boolean/relation?
modifiers = []
def __getitem__(self, k):
if (type(k) == types.IntType):
try:
return self.modifiers[k]
except:
return None
for m in self.modifiers:
if (str(m.type) == k or m.type.value == k):
return m
return None
class Triple (PrefixableObject):
"Object to represent a CQL triple"
leftOperand = None
boolean = None
rightOperand = None
def toXCQL(self, depth=0):
"Create the XCQL representation of the object"
space = " " * depth
if (depth == 0):
xml = ['<triple xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<triple>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.boolean.toXCQL(depth+1))
xml.append("%s <leftOperand>\n" % (space))
xml.append(self.leftOperand.toXCQL(depth+2))
xml.append("%s </leftOperand>\n" % (space))
xml.append("%s <rightOperand>\n" % (space))
xml.append(self.rightOperand.toXCQL(depth+2))
xml.append("%s </rightOperand>\n" % (space))
xml.append("%s</triple>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = []
if (self.prefixes):
for p in self.prefixes.keys():
if (p <> ''):
txt.append('>%s="%s"' % (p, self.prefixes[p]))
else:
txt.append('>"%s"' % (self.prefixes[p]))
prefs = ' '.join(txt)
return "(%s %s %s %s)" % (prefs, self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
else:
return "(%s %s %s)" % (self.leftOperand.toCQL(), self.boolean.toCQL(), self.rightOperand.toCQL())
def getResultSetId(self, top=None):
if fullResultSetNameCheck == 0 or self.boolean.value in ['not', 'prox']:
return ""
if top == None:
topLevel = 1
top = self;
else:
topLevel = 0
# Iterate over operands and build a list
rsList = []
if isinstance(self.leftOperand, Triple):
rsList.extend(self.leftOperand.getResultSetId(top))
else:
rsList.append(self.leftOperand.getResultSetId(top))
if isinstance(self.rightOperand, Triple):
rsList.extend(self.rightOperand.getResultSetId(top))
else:
rsList.append(self.rightOperand.getResultSetId(top))
if topLevel == 1:
# Check all elements are the same, if so we're a fubar form of present
if (len(rsList) == rsList.count(rsList[0])):
return rsList[0]
else:
return ""
else:
return rsList
class SearchClause (PrefixableObject):
"Object to represent a CQL searchClause"
index = None
relation = None
term = None
def __init__(self, ind, rel, t):
PrefixableObject.__init__(self)
self.index = ind
self.relation = rel
self.term = t
ind.parent = self
rel.parent = self
t.parent = self
def toXCQL(self, depth=0):
"Produce XCQL version of the object"
space = " " * depth
if (depth == 0):
xml = ['<searchClause xmlns="%s">\n' % (XCQLNamespace)]
else:
xml = ['%s<searchClause>\n' % (space)]
if self.prefixes:
xml.append(PrefixableObject.toXCQL(self, depth+1))
xml.append(self.index.toXCQL(depth+1))
xml.append(self.relation.toXCQL(depth+1))
xml.append(self.term.toXCQL(depth+1))
xml.append("%s</searchClause>\n" % (space))
return ''.join(xml)
def toCQL(self):
text = []
for p in self.prefixes.keys():
if (p <> ''):
text.append('>%s="%s"' % (p, self.prefixes[p]))
else:
text.append('>"%s"' % (self.prefixes[p]))
text.append('%s %s "%s"' % (self.index, self.relation.toCQL(), self.term))
return ' '.join(text)
def getResultSetId(self, top=None):
idx = self.index
idx.resolvePrefix()
if (idx.prefixURI == reservedPrefixes['cql'] and idx.value.lower() == 'resultsetid'):
return self.term.value
else:
return ""
class Index(PrefixedObject):
"Object to represent a CQL index"
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<index%s>%s</index>\n" % (" "*depth, ns, escape(str(self)))
def toCQL(self):
return str(self)
class Relation(PrefixedObject, ModifiableObject):
"Object to represent a CQL relation"
def __init__(self, rel, mods=[]):
self.prefix = "cql"
PrefixedObject.__init__(self, rel)
self.modifiers = mods
for m in mods:
m.parent = self
def toXCQL(self, depth=0):
"Create XCQL representation of object"
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
space = " " * depth
xml = ["%s<relation%s>\n" % (space, ns)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</relation>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
txt.extend(map(str, self.modifiers))
return '/'.join(txt)
class Term:
value = ""
def __init__(self, v):
if (v <> ""):
# Unquoted literal
if v in ['>=', '<=', '>', '<', '<>', "/", '=']:
diag = Diagnostic25()
diag.details = self.value
raise diag
# Check existence of meaningful term
nonanchor = 0
for c in v:
if c != "^":
nonanchor = 1
break
if not nonanchor:
diag = Diagnostic32()
diag.details = "Only anchoring charater(s) in term: " + v
raise diag
# Unescape quotes
if (v[0] == '"' and v[-1] == '"'):
v = v[1:-1]
v = v.replace('\\"', '"')
if (not v and errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
# Check for badly placed \s
startidx = 0
idx = v.find("\\", startidx)
while (idx > -1):
startidx = idx+1
if not irt.term[idx+1] in ['?', '\\', '*', '^']:
diag = Diagnostic26()
diag.details = irt.term
raise diag
v = v.find("\\", startidx)
elif (errorOnEmptyTerm):
diag = Diagnostic27()
raise diag
self.value = v
def __str__(self):
return self.value
def toXCQL(self, depth=0):
if (depth == 0):
ns = ' xmlns="%s"' % (XCQLNamespace)
else:
ns = ""
return "%s<term%s>%s</term>\n" % (" "*depth, ns, escape(self.value))
class Boolean(ModifiableObject):
"Object to represent a CQL boolean"
value = ""
parent = None
def __init__(self, bool, mods=[]):
self.value = bool
self.modifiers = mods
self.parent = None
def toXCQL(self, depth=0):
"Create XCQL representation of object"
space = " " * depth
xml = ["%s<boolean>\n" % (space)]
xml.append("%s <value>%s</value>\n" % (space, escape(self.value)))
if self.modifiers:
xml.append("%s <modifiers>\n" % (space))
for m in self.modifiers:
xml.append(m.toXCQL(depth+2))
xml.append("%s </modifiers>\n" % (space))
xml.append("%s</boolean>\n" % (space))
return ''.join(xml)
def toCQL(self):
txt = [self.value]
for m in self.modifiers:
txt.append(m.toCQL())
return '/'.join(txt)
def resolvePrefix(self, name):
return self.parent.resolvePrefix(name)
class ModifierType(PrefixedObject):
# Same as index, but we'll XCQLify in ModifierClause
parent = None
prefix = "cql"
class ModifierClause:
"Object to represent a relation modifier"
parent = None
type = None
comparison = ""
value = ""
def __init__(self, type, comp="", val=""):
self.type = ModifierType(type)
self.type.parent = self
self.comparison = comp
self.value = val
def __str__(self):
if (self.value):
return "%s%s%s" % (str(self.type), self.comparison, self.value)
else:
return "%s" % (str(self.type))
def toXCQL(self, depth=0):
if (self.value):
return "%s<modifier>\n%s<type>%s</type>\n%s<comparison>%s</comparison>\n%s<value>%s</value>\n%s</modifier>\n" % (" " * depth, " " * (depth+1), escape(str(self.type)), " " * (depth+1), escape(self.comparison), " " * (depth+1), escape(self.value), " " * depth)
else:
return "%s<modifier><type>%s</type></modifier>\n" % (" " * depth, escape(str(self.type)))
def toCQL(self):
return str(self)
def resolvePrefix(self, name):
# Need to skip parent, which has its own resolvePrefix
# eg boolean or relation, neither of which is prefixable
return self.parent.parent.resolvePrefix(name)
# Requires changes for: <= >= <>, and escaped \" in "
# From shlex.py (std library for 2.2+)
class CQLshlex(shlex):
"shlex with additions for CQL parsing"
quotes = '"'
commenters = ""
nextToken = ""
def __init__(self, thing):
shlex.__init__(self, thing)
self.wordchars += "!@#$%^&*-+{}[];,.?|~`:\\"
self.wordchars += ''.join(map(chr, range(128,254)))
def read_token(self):
"Read a token from the input stream (no pushback or inclusions)"
while 1:
if (self.nextToken != ""):
self.token = self.nextToken
self.nextToken = ""
# Bah. SUPER ugly non portable
if self.token == "/":
self.state = ' '
break
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print "shlex: in state ", repr(self.state), " I see character:", repr(nextchar)
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in whitespace state"
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
self.token = nextchar
self.state = nextchar
elif nextchar in ['<', '>']:
self.token = nextchar
self.state = '<'
else:
self.token = nextchar
if self.token:
break # emit current token
else:
continue
elif self.state == '<':
# Only accumulate <=, >= or <>
if self.token == ">" and nextchar == "=":
self.token = self.token + nextchar
self.state = ' '
break
elif self.token == "<" and nextchar in ['>', '=']:
self.token = self.token + nextchar
self.state = ' '
break
elif not nextchar:
self.state = None
break
elif nextchar == "/":
self.state = "/"
self.nextToken = "/"
break
elif nextchar in self.wordchars:
self.state='a'
self.nextToken = nextchar
break
elif nextchar in self.quotes:
self.state=nextchar
self.nextToken = nextchar
break
else:
self.state = ' '
break
elif self.state in self.quotes:
self.token = self.token + nextchar
# Allow escaped quotes
if nextchar == self.state and self.token[-2] != '\\':
self.state = ' '
break
elif not nextchar: # end of file
if self.debug >= 2:
print "shlex: I see EOF in quotes state"
# Override SHLEX's ValueError to throw diagnostic
diag = Diagnostic14()
diag.details = self.token[:-1]
raise diag
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print "shlex: I see whitespace in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif nextchar in self.wordchars or nextchar in self.quotes:
self.token = self.token + nextchar
elif nextchar in ['>', '<']:
self.nextToken = nextchar
self.state = '<'
break
else:
self.pushback = [nextchar] + self.pushback
if self.debug >= 2:
print "shlex: I see punctuation in word state"
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.debug > 1:
if result:
print "shlex: raw token=" + `result`
else:
print "shlex: raw token=EOF"
return result
class CQLParser:
"Token parser to create object structure for CQL"
parser = ""
currentToken = ""
nextToken = ""
def __init__(self, p):
""" Initialise with shlex parser """
self.parser = p
self.fetch_token() # Fetches to next
self.fetch_token() # Fetches to curr
def is_boolean(self, token):
"Is the token a boolean"
token = token.lower()
return token in booleans
def fetch_token(self):
""" Read ahead one token """
tok = self.parser.get_token()
self.currentToken = self.nextToken
self.nextToken = tok
def prefixes(self):
"Create prefixes dictionary"
prefs = {}
while (self.currentToken == ">"):
# Strip off maps
self.fetch_token()
if self.nextToken == "=":
# Named map
name = self.currentToken
self.fetch_token() # = is current
self.fetch_token() # id is current
identifier = self.currentToken
self.fetch_token()
else:
name = ""
identifier = self.currentToken
self.fetch_token()
if (errorOnDuplicatePrefix and prefs.has_key(name)):
# Error condition
diag = Diagnostic45()
diag.details = name
raise diag;
if len(identifier) > 1 and identifier[0] == '"' and identifier[-1] == '"':
identifier = identifier[1:-1]
prefs[name.lower()] = identifier
return prefs
def query(self):
""" Parse query """
prefs = self.prefixes()
left = self.subQuery()
while 1:
if not self.currentToken:
break;
bool = self.is_boolean(self.currentToken)
if bool:
boolobject = self.boolean()
right = self.subQuery()
# Setup Left Object
trip = tripleType()
trip.leftOperand = left
trip.boolean = boolobject
trip.rightOperand = right
left.parent = trip
right.parent = trip
boolobject.parent = trip
left = trip
else:
break;
for p in prefs.keys():
left.addPrefix(p, prefs[p])
return left
def subQuery(self):
""" Find either query or clause """
if self.currentToken == "(":
self.fetch_token() # Skip (
object = self.query()
if self.currentToken == ")":
self.fetch_token() # Skip )
else:
diag = Diagnostic13()
diag.details = self.currentToken
raise diag
else:
prefs = self.prefixes()
if (prefs):
object = self.query()
for p in prefs.keys():
object.addPrefix(p, prefs[p])
else:
object = self.clause()
return object
def clause(self):
""" Find searchClause """
bool = self.is_boolean(self.nextToken)
if not bool and not (self.nextToken in [')', '(', '']):
index = indexType(self.currentToken)
self.fetch_token() # Skip Index
rel = self.relation()
if (self.currentToken == ''):
diag = Diagnostic10()
diag.details = "Expected Term, got end of query."
raise(diag)
term = termType(self.currentToken)
self.fetch_token() # Skip Term
irt = searchClauseType(index, rel, term)
elif self.currentToken and (bool or self.nextToken in [')', '']):
irt = searchClauseType(indexType(serverChoiceIndex), relationType(serverChoiceRelation), termType(self.currentToken))
self.fetch_token()
elif self.currentToken == ">":
prefs = self.prefixes()
# iterate to get object
object = self.clause()
for p in prefs.keys():
object.addPrefix(p, prefs[p]);
return object
else:
diag = Diagnostic10()
diag.details = "Expected Boolean or Relation but got: " + self.currentToken
raise diag
return irt
def modifiers(self):
mods = []
while (self.currentToken == modifierSeparator):
self.fetch_token()
mod = self.currentToken
mod = mod.lower()
if (mod == modifierSeparator):
diag = Diagnostic20()
diag.details = "Null modifier"
raise diag
self.fetch_token()
comp = self.currentToken
if (comp in order):
self.fetch_token()
value = self.currentToken
self.fetch_token()
else:
comp = ""
value = ""
mods.append(ModifierClause(mod, comp, value))
return mods
def boolean(self):
""" Find boolean """
self.currentToken = self.currentToken.lower()
if self.currentToken in booleans:
bool = booleanType(self.currentToken)
self.fetch_token()
bool.modifiers = self.modifiers()
for b in bool.modifiers:
b.parent = bool
else:
diag = Diagnostic37()
diag.details = self.currentToken
raise diag
return bool
def relation(self):
""" Find relation """
self.currentToken = self.currentToken.lower()
rel = relationType(self.currentToken)
self.fetch_token()
rel.modifiers = self.modifiers()
for r in rel.modifiers:
r.parent = rel
return rel
class XCQLParser:
""" Parser for XCQL using some very simple DOM """
def firstChildElement(self, elem):
""" Find first child which is an Element """
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
return c
return None
def firstChildData(self,elem):
""" Find first child which is Data """
for c in elem.childNodes:
if c.nodeType == Node.TEXT_NODE:
return c
return None
def searchClause(self, elem):
""" Process a <searchClause> """
sc = searchClauseType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "index":
sc.index = indexType(self.firstChildData(c).data.lower())
elif c.localName == "term":
sc.term = termType(self.firstChildData(c).data)
elif c.localName == "relation":
sc.relation = self.relation(c)
elif c.localName == "prefixes":
sc.prefixes = self.prefixes(c)
else:
raise(ValueError, c.localName)
return sc
def triple(self, elem):
""" Process a <triple> """
trip = tripleType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "boolean":
trip.boolean = self.boolean(c)
elif c.localName == "prefixes":
trip.prefixes = self.prefixes(c)
elif c.localName == "leftOperand":
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.leftOperand = self.searchClause(c2)
else:
trip.leftOperand = self.triple(c2)
else:
c2 = self.firstChildElement(c)
if c2.localName == "searchClause":
trip.rightOperand = self.searchClause(c2)
else:
trip.rightOperand = self.triple(c2)
return trip
def relation(self, elem):
""" Process a <relation> """
rel = relationType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
rel.value = c.firstChild.data.lower()
elif c.localName == "modifiers":
mods = []
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
for c3 in c2.childNodes:
if c3.localName == "value":
val = self.firstChildData(c2).data.lower()
mods.append(val)
rel.modifiers = mods
return rel
def boolean(self, elem):
"Process a <boolean>"
bool = booleanType()
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
if c.localName == "value":
bool.value = self.firstChildData(c).data.lower()
else:
# Can be in any order, so we need to extract, then order
mods = {}
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "modifier":
type = ""
value = ""
for c3 in c2.childNodes:
if c3.nodeType == Node.ELEMENT_NODE:
if c3.localName == "value":
value = self.firstChildData(c3).data.lower()
elif c3.localName == "type":
type = self.firstChildData(c3).data
mods[type] = value
modlist = []
for t in booleanModifierTypes[1:]:
if mods.has_key(t):
modlist.append(mods[t])
else:
modlist.append('')
bool.modifiers = modlist
return bool
def prefixes(self, elem):
"Process <prefixes>"
prefs = {}
for c in elem.childNodes:
if c.nodeType == Node.ELEMENT_NODE:
# prefix
name = ""
identifier = ""
for c2 in c.childNodes:
if c2.nodeType == Node.ELEMENT_NODE:
if c2.localName == "name":
name = self.firstChildData(c2).data.lower()
elif c2.localName == "identifier":
identifier = self.firstChildData(c2).data
prefs[name] = identifier
return prefs
def xmlparse(s):
""" API. Return a seachClause/triple object from XML string """
doc = parseString(s)
q = xcqlparse(doc.firstChild)
return q
def xcqlparse(query):
""" API. Return a searchClause/triple object from XML DOM objects"""
# Requires only properties of objects so we don't care how they're generated
p = XCQLParser()
if query.localName == "searchClause":
return p.searchClause(query)
else:
return p.triple(query)
def parse(query):
""" API. Return a searchClause/triple object from CQL string"""
try:
query = query.encode("utf-8")
except:
diag = Diagnostic10()
diag.details = "Cannot parse non utf-8 characters"
raise diag
q = StringIO(query)
lexer = CQLshlex(q)
parser = CQLParser(lexer)
object = parser.query()
if parser.currentToken != '':
diag = Diagnostic10()
diag.details = "Unprocessed tokens remain: " + repr(parser.currentToken)
raise diag
else:
del lexer
del parser
del q
return object
# Assign our objects to generate
tripleType = Triple
booleanType = Boolean
relationType = Relation
searchClauseType = SearchClause
modifierClauseType = ModifierClause
modifierTypeType = ModifierType
indexType = Index
termType = Term
try:
from CQLUtils import *
tripleType = CTriple
booleanType = CBoolean
relationType = CRelation
searchClauseType = CSearchClause
modifierClauseType = CModifierClause
modifierTypeType = CModifierType
indexType = CIndex
termType = CTerm
except:
# Nested scopes. Utils needs our classes to parent
# We need its classes to build (maybe)
pass
if (__name__ == "__main__"):
import sys;
s = sys.stdin.readline()
try:
q = parse(s);
except SRWDiagnostic, diag:
# Print a full version, not just str()
print "Diagnostic Generated."
print " Code: " + str(diag.code)
print " Details: " + str(diag.details)
print " Message: " + str(diag.message)
else:
print q.toXCQL()[:-1];
|
|
#!/usr/bin/env python2.7
import sys
import os
import argparse
import re
from operator import itemgetter
#God this is dumb. This needs to be done so python doesn't throw an error when you pipe it to head
import signal
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
#argParser = argparse.ArgumentParser()
#argParser.add_argument("xaa")
#argParser.parse_args()
#print args.xaa
def main():
args=processArgs()
if args.r != False :
rCount = countResidues(args.i,args.type)
print "Residues: ",rCount
sys.exit()
fastaDict=loadFasta(args.i)
fastaDict=getFastaStats(fastaDict)
fastaOrder = fastaDict.keys()
if (args.s!=None):
fastaOrder=orderFasta(fastaDict,args.s,args.d)
print "\n".join(fastaOrder)
#for fa in fastaOrder:
#print ">%s: %s\n%s" % (fa,fastaDict[fa]['length'],fastaDict[fa]['seq'])
#print ">%s\n%s" % (fa,fastaDict[fa]['seq'])
args.i.close()
#this is done to prevent a python error on some head functions
try:
sys.stdout.close()
except:
pass
try:
sys.stderr.close()
except:
pass
def getFastaStats(faDict):
for fa in faDict:
faDict[fa]['length']=int(len(faDict[fa]['seq']))
return faDict
def loadFasta(file):
rdict={}
currid=""
with open (file, "r") as f:
#if (re.match("\s*#")):
# continue
#if (fl=="\n"):
# continue
for line in f:
if (re.match("\s*#",line)):
continue
if (line=="\n"):
continue
line = line.rstrip('\n')
if (re.match(">",line)):
currid=line.lstrip(">")
else:
try:
rdict[currid]['seq']+=line
except:
rdict[currid]={}
rdict[currid]['seq']=line
return rdict
def countResidues (filename,ftype):
resCount = 0
for header,seq,qual in fiterator(filename,ftype) :
resCount += len(seq)
return resCount
def orderFasta(faDict,stype,sdirection):
keylist = faDict.keys()
srev = False if sdirection=='asc' else True
if(stype=='id'):
keylist=sorted(keylist, reverse=srev)
else:
try:
faDict[keylist[0]]['length']
except:
faDict = getFastaStats(faDict)
keylist=sorted(keylist,key = lambda x: (faDict[x]['length']), reverse=srev)
return keylist
#def fiterator (fileloc,ftype) :
def fiterator (fh,ftype) :
if ftype == 'fasta' :
seq = ''
header = ''
qual = ''
for line in fh:
line = line.strip()
if line[0] == '>' :
if seq != '' :
yield header,seq,qual
seq = ''
header = line
else:
seq += line
if seq != '':
yield header,seq,qual
if ftype == 'fastq' :
header = ''
seq = ''
pip = ''
qual = ''
for line in fh:
#print "line",line
line = line.strip()
if header == '' :
header = line
#print "just assigned",header
if header[0] != '@' :
print "Invalid fastq format. This should be a header but it's not: %s\nExiting" % (line)
sys.exit()
elif seq == '' :
seq = line
elif pip == '' :
pip = line
elif qual == '' :
qual = line
if len(header) == 0 or \
len(seq) == 0 or \
len(pip) == 0 or \
len(qual) == 0 :
print "This is a disaster. Exiting"
sys.exit()
else :
yield header,seq,qual
#print "header",header
header = ''
seq = ''
pip = ''
qual = ''
'''
header = ''
quality = ''
with open(fileloc,'r') as fh :
while True :
try :
if header == '' :
header = fh.readline().strip()
if ftype == 'fasta' :
sequence = ''
for line in fh.readline() :
line.strip()
if re.search('^>',line) == None :
sequence += line
else :
break
elif ftype == 'fastq' :
sequence = fh.readline().strip()
pip = fh.readline()
quality = fh.readline().strip()
else :
print "You must choose either fasta or fastq"
sys.exit()
print "head=",header
yield header,sequence,quality
except Exception as e :
return header,sequence,quality
'''
def checkErrors(args):
if (os.path.isfile(args.file) == False):
sys.exit("The argument you entered is not a file")
def processArgs():
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('\nerror: %s\n\n' % message)
self.print_help()
sys.exit(2)
argParser = MyParser()
argParser.add_argument('-i', help="A properly formatted fasta file", type=argparse.FileType('r'), default=sys.stdin)
argParser.add_argument('type', help="fasta or fastq", choices=['fasta','fastq'])
argParser.add_argument('-s', metavar='sort', help='Enter either "name" or "length"', choices=['name','length'], default=None)
argParser.add_argument('-d', metavar='direction', help='Enter either "asc" or "desc". Defaults to "asc"', choices=['asc','desc'], default=None)
argParser.add_argument('-r', action = 'store_true', help='Counts the number of residues in the file')
args = argParser.parse_args()
return args
#This is required because by default this is a module. Running this makes it execute main as if it is a script
if __name__ == '__main__':
main()
'''A few things to remember:
-tuples are immutable. Once you create one, its contents can't be changed, unlike a list.
'''
|
|
# Copyright (C) 2009, Hyves (Startphone Ltd.)
#
# This module is part of the Concurrence Framework and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from __future__ import with_statement
import logging
from concurrence import Tasklet, TaskletPool, Channel, DeferredQueue, QueueChannel, TIMEOUT_CURRENT, TimeoutError
from concurrence.io import Socket, BufferedStream
from concurrence.memcache import MemcacheError, MemcacheResult
from concurrence.memcache.codec import MemcacheCodec
from concurrence.memcache.behaviour import MemcacheBehaviour
from concurrence.memcache.protocol import MemcacheProtocol
#TODO:
#linger on close
#how to communicate and handle errors (raise error for get/gets?) and or extra stuff like flags?
#timeout on commands (test tasklet based timeout)
#statistics
#gzip support
#close unused connections
#proper buffer sizes
#norepy e.g. no server response to set commands, what is the fastest fill rate agains a single memcached server?
#stats cmd (+item + item size stats
#what to do with partial multi get failure accross multiple servers?, e.g. return partial keys?
#bundling of multiple requests in 1 flush (autoflush on/off)
#todo detect timeouts on write/read, and mark host as dead
#keep some time before retrying host
#close down node no recv ERROR?
#UPD support
#binary support
#how to handle timouts in the pipelined case?
#TODO validate keys!, they are 'txt' not random bins!, e.g. some chars not allowed, which ones?
#CLAMP timestamps at 2**31-1
#CHECK KEY MAX LEN, VAL MAX VALUE LEN, VALID KEY
class ResultChannel(QueueChannel):
def __init__(self):
super(ResultChannel, self).__init__(preference = 1)
class CommandBatch(object):
def __init__(self, target):
self._cmds = []
self._target = target
self._result_funcs = []
def _batch_command(self, cmd, args, error_value = None, result_func = None):
self._cmds.append((cmd, args, error_value))
self._result_funcs.append(result_func)
def delete(self, key, expiration = 0):
self._batch_command("delete", (key, expiration), None, lambda r,v: r)
def set(self, key, data, expiration = 0, flags = 0):
self._batch_command("set", (key, data, expiration, flags), None, lambda r,v: r)
def add(self, key, data, expiration = 0, flags = 0):
self._batch_command("add", (key, data, expiration, flags))
def replace(self, key, data, expiration = 0, flags = 0):
self._batch_command("replace", (key, data, expiration, flags))
def append(self, key, data, expiration = 0, flags = 0):
self._batch_command("append", (key, data, expiration, flags))
def prepend(self, key, data, expiration = 0, flags = 0):
self._batch_command("prepend", (key, data, expiration, flags))
def cas(self, key, data, cas_unique, expiration = 0, flags = 0):
self._batch_command("cas", (key, data, expiration, flags, cas_unique))
def incr(self, key, increment):
self._batch_command("incr", (key, increment))
def decr(self, key, increment):
self._batch_command("decr", (key, increment))
def get(self, key, default = None):
def _r(result, values):
return values.get(key, default)
self._batch_command("get", ([key], ), {}, _r)
def getr(self, key, default = None):
def _r(result, values):
return result, values.get(key, default)
self._batch_command("get", ([key]), {}, _r)
def gets(self, key, default = None):
def _r(result, values):
value, cas_unique = values.get(key, (default, None))
return result, value, cas_unique
self._batch_command("gets", ([key]), {}, _r)
def execute(self):
assert self._cmds, "expected some cmds to be batched"
class _ResultChannel(ResultChannel):
def receive(_self, timeout = TIMEOUT_CURRENT):
_result = super(_ResultChannel, _self).receive(timeout)
_result_func = self._result_funcs[_self.i]
if _result_func is not None:
_result = _result_func(*_result)
_self.i += 1
return _result
def __iter__(_self):
return _self.receive_n(len(self._cmds))
result_channel = _ResultChannel()
result_channel.i = 0
self._target._defer_commands(self._cmds, result_channel)
return result_channel
class MemcacheConnection(object):
log = logging.getLogger("MemcacheConnection")
_tasklet_pool = TaskletPool(worker_timeout = 2.0,
worker_timeout_relative = False)
def __init__(self, address, protocol = "text", codec = "default"):
self._address = address
self._stream = None
self._read_queue = DeferredQueue(self._tasklet_pool.defer)
self._write_queue = DeferredQueue(self._tasklet_pool.defer)
self._protocol = MemcacheProtocol.create(protocol)
self._protocol.set_codec(MemcacheCodec.create(codec))
def connect(self):
self._stream = BufferedStream(Socket.connect(self._address))
def is_connected(self):
return self._stream is not None
def _defer_commands(self, cmds, result_channel):
def _read_results():
protocol = self._protocol
with self._stream.get_reader() as reader:
for cmd, args, error_value in cmds:
try:
result = protocol.read(cmd, reader)
result_channel.send(result)
except TaskletExit:
raise
except:
self.log.exception("read error in defer_commands")
result_channel.send((MemcacheResult.ERROR, error_value))
#end _read_commands
def _write_commands():
protocol = self._protocol
try:
if not self.is_connected():
self.connect()
except TaskletExit:
raise
except:
self.log.exception("connect error in defer_commands")
for _, _, error_value in cmds:
result_channel.send((MemcacheResult.ERROR, error_value))
return
with self._stream.get_writer() as writer:
for cmd, args, error_value in cmds:
try:
protocol.write(cmd, writer, args)
except TaskletExit:
raise
except:
self.log.exception("write error in defer_commands")
result_channel.send((MemcacheResult.ERROR, error_value))
writer.flush()
self._read_queue.defer(_read_results)
#end _write_commands
self._write_queue.defer(_write_commands)
def _defer_command(self, cmd, args, result_channel, error_value = None):
self._defer_commands([(cmd, args, error_value)], result_channel)
def _do_command(self, cmd, args, error_value = None):
result_channel = ResultChannel()
self._defer_command(cmd, args, result_channel, error_value)
try:
return result_channel.receive()
except TimeoutError:
return MemcacheResult.TIMEOUT, error_value
def close(self):
if self.is_connected():
self._stream.close()
self._stream = None
def __setitem__(self, key, data):
self.set(key, data)
def __getitem__(self, key):
return self.get(key)
def delete(self, key, expiration = 0):
return self._do_command("delete", (key, expiration))[0]
def set(self, key, data, expiration = 0, flags = 0):
return self._do_command("set", (key, data, expiration, flags))[0]
def add(self, key, data, expiration = 0, flags = 0):
return self._do_command("add", (key, data, expiration, flags))[0]
def replace(self, key, data, expiration = 0, flags = 0):
return self._do_command("replace", (key, data, expiration, flags))[0]
def append(self, key, data, expiration = 0, flags = 0):
return self._do_command("append", (key, data, expiration, flags))[0]
def prepend(self, key, data, expiration = 0, flags = 0):
return self._do_command("prepend", (key, data, expiration, flags))[0]
def cas(self, key, data, cas_unique, expiration = 0, flags = 0):
return self._do_command("cas", (key, data, expiration, flags, cas_unique))[0]
def incr(self, key, increment):
return self._do_command("incr", (key, increment))
def decr(self, key, increment):
return self._do_command("decr", (key, increment))
def get(self, key, default = None):
_, values = self._do_command("get", ([key], ), {})
return values.get(key, default)
def getr(self, key, default = None):
result, values = self._do_command("get", ([key], ), {})
return result, values.get(key, default)
def gets(self, key, default = None):
result, values = self._do_command("gets", ([key], ), {})
value, cas_unique = values.get(key, (default, None))
return result, value, cas_unique
def get_multi(self, keys):
return self._do_command("get", (keys, ))
def gets_multi(self, keys):
return self._do_command("gets", (keys, ))
def version(self):
return self._do_command("version", ())
def batch(self):
return CommandBatch(self)
class MemcacheConnectionManager(object):
_instance = None #TODO when we support multiple protocols, we need to have 1 instance per protocol
def __init__(self):
self._connections = {} #address -> connection
def get_connection(self, address, protocol):
"""gets a connection to memcached servers at given address using given protocol."""
if not address in self._connections:
self._connections[address] = MemcacheConnection(address, protocol)
return self._connections[address]
def close_all(self):
for connection in self._connections.values():
connection.close()
self._connections = {}
@classmethod
def create(cls, type_):
if isinstance(type_, MemcacheConnectionManager):
return type_
elif type_ == "default":
if cls._instance is None:
cls._instance = MemcacheConnectionManager()
return cls._instance
else:
raise MemcacheError("connection manager: %s" % type_)
class Memcache(object):
def __init__(self, servers = None, codec = "default", behaviour = "ketama", protocol = "text", connection_manager = "default"):
self.read_timeout = 10
self.write_timeout = 10
self.connect_timeout = 2
self._protocol = MemcacheProtocol.create(protocol)
self._protocol.set_codec(codec)
self._connection_manager = MemcacheConnectionManager.create(connection_manager)
self._behaviour = MemcacheBehaviour.create(behaviour)
self._key_to_addr = self._behaviour.key_to_addr
self.set_servers(servers)
def _get_connection(self, addr):
return self._connection_manager.get_connection(addr, self._protocol)
def _get(self, cmd, key, default):
result_channel = ResultChannel()
connection = self.connection_for_key(key)
connection._defer_command(cmd, [[key]], result_channel, {})
result, values = result_channel.receive()
return result, values.get(key, default)
def _get_multi(self, cmd, keys):
#group keys by address (address->[keys]):
grouped_addrs = {}
for key in keys:
addr = self._key_to_addr(key)
grouped_addrs.setdefault(addr, []).append(key)
#n is the number of servers we need to 'get' from
n = len(grouped_addrs)
result_channel = ResultChannel()
for address, _keys in grouped_addrs.iteritems():
connection = self._get_connection(address)
connection._defer_command(cmd, [_keys], result_channel, {})
#loop over the results as they come in and aggregate the final result
values = {}
result = MemcacheResult.OK
for _result, _values in result_channel.receive_n(n):
if MemcacheResult.OK is _result:
values.update(_values)
else:
result = _result #document that we only return the last not OK result
return result, values
def set_servers(self, servers = None):
if servers is not None:
self._behaviour.set_servers(servers)
def connection_for_key(self, key):
return self._get_connection(self._key_to_addr(key))
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.set(key, data)
def delete(self, key, expiration = 0):
return self.connection_for_key(key)._do_command("delete", (key, expiration))[0]
def set(self, key, data, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("set", (key, data, expiration, flags))[0]
def add(self, key, data, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("add", (key, data, expiration, flags))[0]
def replace(self, key, data, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("replace", (key, data, expiration, flags))[0]
def append(self, key, data, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("append", (key, data, expiration, flags))[0]
def prepend(self, key, data, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("prepend", (key, data, expiration, flags))[0]
def cas(self, key, data, cas_unique, expiration = 0, flags = 0):
return self.connection_for_key(key)._do_command("cas", (key, data, expiration, flags, cas_unique))[0]
def incr(self, key, increment):
return self.connection_for_key(key)._do_command("incr", (key, increment))
def decr(self, key, increment):
return self.connection_for_key(key)._do_command("decr", (key, increment))
def get(self, key, default = None):
return self._get("get", key, default)[1]
def getr(self, key, default = None):
return self._get("get", key, default)
def gets(self, key, default = None):
result, (value, cas_unique) = self._get("gets", key, (default, None))
return result, value, cas_unique
def get_multi(self, keys):
return self._get_multi("get", keys)
def gets_multi(self, keys):
return self._get_multi("gets", keys)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cond_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.util import compat
class CondV2Test(test.TestCase):
def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None):
if not feed_dict:
feed_dict = {}
with self.session(graph=ops.get_default_graph()) as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
expected = control_flow_ops.cond(pred, true_fn, false_fn, name="expected")
actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual")
expected_grad = gradients_impl.gradients(expected, train_vals)
actual_grad = gradients_impl.gradients(actual, train_vals)
sess_run_args = {pred: True}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
sess_run_args = {pred: False}
sess_run_args.update(feed_dict)
expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run(
(expected, actual, expected_grad, actual_grad), sess_run_args)
self.assertEqual(expected_val, actual_val)
self.assertEqual(expected_grad_val, actual_grad_val)
def testBasic(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * 2.0
def false_fn():
return y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testMultipleOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return x, y * 3.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testBasic2(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return x * y * 2.0
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNoInputs(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
def true_fn():
return constant_op.constant(1.0)
def false_fn():
return constant_op.constant(2.0)
out = cond_v2.cond_v2(pred, true_fn, false_fn)
self.assertEqual(sess.run(out, {pred: True}), (1.0,))
self.assertEqual(sess.run(out, {pred: False}), (2.0,))
def _createCond(self, name):
"""Helper function for testDefaultName."""
pred = constant_op.constant(True, name="pred")
x = constant_op.constant(1.0, name="x")
def true_fn():
return x
def false_fn():
return x + 1
output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
cond_op = output.op.inputs[0].op
self.assertEqual(cond_op.type, "If")
return cond_op
def testDefaultName(self):
with ops.Graph().as_default():
cond_op = self._createCond(None)
self.assertEqual(cond_op.name, "cond")
self.assertRegexpMatches(
cond_op.get_attr("then_branch").name, r"cond_true_\d*")
self.assertRegexpMatches(
cond_op.get_attr("else_branch").name, r"cond_false_\d*")
with ops.Graph().as_default():
with ops.name_scope("foo"):
cond1_op = self._createCond("")
self.assertEqual(cond1_op.name, "foo/cond")
self.assertRegexpMatches(
cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*")
self.assertRegexpMatches(
cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*")
cond2_op = self._createCond(None)
self.assertEqual(cond2_op.name, "foo/cond_1")
self.assertRegexpMatches(
cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*")
self.assertRegexpMatches(
cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*")
def testDefunInCond(self):
self.skipTest("b/117293122")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
return x * y * 2.0
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedDefunInCond(self):
self.skipTest("b/117284369")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
return x * y * 2.0
return nested_fn()
return fn()
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testDoubleNestedDefunInCond(self):
self.skipTest("b/117284369")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
@function.defun
def fn():
@function.defun
def nested_fn():
@function.defun
def nested_nested_fn():
return x * y * 2.0
return nested_nested_fn()
return nested_fn()
return fn()
def false_fn():
return 2.0
self._testCond(true_fn, false_fn, [x])
self._testCond(true_fn, false_fn, [x, y])
self._testCond(true_fn, false_fn, [y])
def testNestedCond(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
return x * y * 2.0
def false_false_fn():
return x * 5.0
return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn")
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testNestedCondBothBranches(self):
def run_test(pred_value):
def build_graph():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return _cond(pred, lambda: x + y, lambda: x * x, name=None)
def false_fn():
return _cond(pred, lambda: x - y, lambda: y * y, name=None)
return x, y, pred, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {pred: pred_value})
self._testCond(true_fn, false_fn, [x], {pred: pred_value})
self._testCond(true_fn, false_fn, [y], {pred: pred_value})
run_test(True)
run_test(False)
def testDoubleNestedCond(self):
def run_test(pred1_value, pred2_value):
def build_graph():
pred1 = array_ops.placeholder(dtypes.bool, name="pred1")
pred2 = array_ops.placeholder(dtypes.bool, name="pred2")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def false_true_fn():
def false_true_true_fn():
return x * y * 2.0
def false_true_false_fn():
return x * 10.0
return _cond(
pred1,
false_true_true_fn,
false_true_false_fn,
name="inside_false_true_fn")
def false_false_fn():
return x * 5.0
return _cond(
pred2, false_true_fn, false_false_fn, name="inside_false_fn")
return x, y, pred1, pred2, true_fn, false_fn
with ops.Graph().as_default():
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x, y], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [x], {
pred1: pred1_value,
pred2: pred2_value
})
x, y, pred1, pred2, true_fn, false_fn = build_graph()
self._testCond(true_fn, false_fn, [y], {
pred1: pred1_value,
pred2: pred2_value
})
run_test(True, True)
run_test(True, False)
run_test(False, False)
run_test(False, True)
def testGradientFromInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testGradientFromInsideNestedDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
# Compute grads inside a Defun.
@function.defun
def nesting_fn():
@function.defun
def inner_nesting_fn():
return gradients_impl.gradients(cond_outer, [x, y])
return inner_nesting_fn()
grads = nesting_fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default():
grads, pred_outer, pred_inner = build_graph()
with self.session(graph=ops.get_default_graph()) as sess:
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testBuildCondAndGradientInsideDefun(self):
def build_graph():
pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer")
pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(2.0, name="y")
# Build cond and its gradient inside a Defun.
@function.defun
def fn():
def true_fn():
return 2.0
def false_fn():
def inner_true_fn():
return x * y * 2.0
def inner_false_fn():
return x * 5.0
return cond_v2.cond_v2(
pred_inner, inner_true_fn, inner_false_fn, name="inner_cond")
cond_outer = cond_v2.cond_v2(
pred_outer, true_fn, false_fn, name="outer_cond")
return gradients_impl.gradients(cond_outer, [x, y])
grads = fn()
return grads, pred_outer, pred_inner
with ops.Graph().as_default(), self.session(
graph=ops.get_default_graph()) as sess:
grads, pred_outer, pred_inner = build_graph()
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: True
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: True,
pred_inner: False
}), [0., 0.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: True
}), [4., 2.])
self.assertSequenceEqual(
sess.run(grads, {
pred_outer: False,
pred_inner: False
}), [5., 0.])
def testSecondDerivative(self):
with self.cached_session() as sess:
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
cond_grad = gradients_impl.gradients(cond, [x])
cond_grad_grad = gradients_impl.gradients(cond_grad, [x])
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testGradientOfDeserializedCond(self):
with ops.Graph().as_default():
pred = array_ops.placeholder(dtypes.bool, name="pred")
x = constant_op.constant(3.0, name="x")
ops.add_to_collection("x", x)
def true_fn():
return math_ops.pow(x, 3)
def false_fn():
return x
ops.add_to_collection("pred", pred)
cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond")
ops.add_to_collection("cond", cond)
meta_graph = saver.export_meta_graph()
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
saver.import_meta_graph(meta_graph)
x = ops.get_collection("x")[0]
pred = ops.get_collection("pred")[0]
cond = ops.get_collection("cond")
cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad")
cond_grad_grad = gradients_impl.gradients(
cond_grad, [x], name="cond_grad_grad")
# d[x^3]/dx = 3x^2
true_val = sess.run(cond_grad, {pred: True})
self.assertEqual(true_val, [27.0])
# d[x]/dx = 1
false_val = sess.run(cond_grad, {pred: False})
self.assertEqual(false_val, [1.0])
true_val = sess.run(cond_grad_grad, {pred: True})
# d2[x^3]/dx2 = 6x
self.assertEqual(true_val, [18.0])
false_val = sess.run(cond_grad_grad, {pred: False})
# d2[x]/dx2 = 0
self.assertEqual(false_val, [0.0])
def testLowering(self):
with ops.Graph().as_default() as g:
with self.session(graph=g) as sess:
out_cond = self._createCond("cond")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond, options=run_options, run_metadata=run_metadata)
# If lowering was enabled, there should be a `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(switch_found,
"A `Switch` op should exist if the graph was lowered.")
# If lowering was enabled, there should be no `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(if_found,
"An `If` op was found, but it should be lowered.")
def testLoweringDisabledInXLA(self):
with self.session(graph=ops.Graph()) as sess:
# Build the cond_v2 in an XLA context
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
out_cond = self._createCond("cond")
xla_context.Exit()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond, options=run_options, run_metadata=run_metadata)
# Lowering disabled in XLA, there should be no `Switch` node
switch_found = any(
any(node.op == "Switch" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertFalse(
switch_found,
"A `Switch` op exists, but the graph should not be lowered.")
# Lowering disabled in XLA, there should still be an `If` node
if_found = any(
any(node.op == "If" for node in graph.node)
for graph in run_metadata.partition_graphs
)
self.assertTrue(
if_found,
"An `If` op was not found, but the graph should not be lowered.")
def testLoweringDisabledWithSingleThreadedExecutorContext(self):
with self.session(graph=ops.Graph()) as sess:
@function.defun
def _add_cond(x):
return cond_v2.cond_v2(
constant_op.constant(True, name="pred"),
lambda: x,
lambda: x + 1)
x = array_ops.placeholder(shape=None, dtype=dtypes.float32)
with context.function_executor_type("SINGLE_THREADED_EXECUTOR"):
out_cond = _add_cond(x)
# The fact that sess.run() succeeds means lowering is disabled, because
# the single threaded executor does not support cond v1 ops.
sess.run(out_cond, feed_dict={x: 1.0})
@test_util.enable_control_flow_v2
def testStructuredOutputs(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return ((x * y,), y)
def false_fn():
return ((x,), y * 3.0)
output = control_flow_ops.cond(
constant_op.constant(False), true_fn, false_fn)
self.assertEqual(self.evaluate(output[0][0]), 1.)
self.assertEqual(self.evaluate(output[1]), 9.)
@test_util.enable_control_flow_v2
def testRaisesOutputStructuresMismatch(self):
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(3.0, name="y")
def true_fn():
return x * y, y
def false_fn():
return ((x,), y * 3.0)
with self.assertRaisesRegexp(
ValueError, "Outputs of true_fn and false_fn must"
" have the same structure"):
control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
@test_util.enable_control_flow_v2
def testCondAndTensorArray(self):
if test_util.is_gpu_available():
old_enable_tensor_array_v2 = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
# TODO(b/119689663): Enable this.
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = False
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
output_t = output.stack()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
if test_util.is_gpu_available():
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = old_enable_tensor_array_v2
@test_util.enable_control_flow_v2
def testCondAndTensorArrayInDefun(self):
if test_util.is_gpu_available():
old_enable_tensor_array_v2 = tensor_array_ops.ENABLE_TENSOR_ARRAY_V2
# TODO(b/119689663): Enable this.
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = False
@function.defun
def f():
x = math_ops.range(-5, 5)
output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0])
def loop_body(i, output):
def if_true():
return output.write(i, x[i]**2)
def if_false():
return output.write(i, x[i])
output = control_flow_ops.cond(x[i] > 0, if_true, if_false)
return i + 1, output
_, output = control_flow_ops.while_loop(
lambda i, arr: i < x.shape[0],
loop_body,
loop_vars=(constant_op.constant(0), output))
return output.stack()
output_t = f()
self.assertAllEqual(
self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16])
if test_util.is_gpu_available():
tensor_array_ops.ENABLE_TENSOR_ARRAY_V2 = old_enable_tensor_array_v2
class CondV2CollectionTest(test.TestCase):
def testCollectionIntValueAccessInCond(self):
"""Read values from graph collections inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = 2
y = 5
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_const = constant_op.constant(ops.get_collection("x")[0])
y_const = constant_op.constant(ops.get_collection("y")[0])
return math_ops.add(x_const, y_const)
cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionTensorValueAccessInCond(self):
"""Read tensors from collections inside of cond_v2 & use them."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
ops.add_to_collection("x", x)
ops.add_to_collection("y", y)
def fn():
x_read = ops.get_collection("x")[0]
y_read = ops.get_collection("y")[0]
return math_ops.add(x_read, y_read)
cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn)
self.assertEquals(cnd.eval(), 7)
def testCollectionIntValueWriteInCond(self):
"""Make sure Int writes to collections work inside of cond_v2."""
with ops.Graph().as_default() as g:
with self.session(graph=g):
x = constant_op.constant(2)
y = constant_op.constant(5)
def true_fn():
z = math_ops.add(x, y)
ops.add_to_collection("z", 7)
return math_ops.mul(x, z)
def false_fn():
z = math_ops.add(x, y)
return math_ops.mul(x, z)
cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd.eval(), 14)
read_z_collection = ops.get_collection("z")
self.assertEquals(read_z_collection, [7])
class CondV2ContainerTest(test.TestCase):
def testContainer(self):
"""Set containers outside & inside of cond_v2.
Make sure the containers are set correctly for both variable creation
(tested by variables.Variable) and for stateful ops (tested by FIFOQueue)
"""
self.skipTest("b/113048653")
with ops.Graph().as_default() as g:
with self.session(graph=g):
v0 = variables.Variable([0])
q0 = data_flow_ops.FIFOQueue(1, dtypes.float32)
def container(node):
return node.op.get_attr("container")
self.assertEqual(compat.as_bytes(""), container(v0))
self.assertEqual(compat.as_bytes(""), container(q0.queue_ref))
def true_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2t"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2t"), container(v2))
self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(2.0)
def false_fn():
# When this branch is created in cond below,
# the container should begin with 'l1'
v1 = variables.Variable([1])
q1 = data_flow_ops.FIFOQueue(1, dtypes.float32)
with ops.container("l2f"):
v2 = variables.Variable([2])
q2 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v3 = variables.Variable([1])
q3 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v1))
self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref))
self.assertEqual(compat.as_bytes("l2f"), container(v2))
self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref))
self.assertEqual(compat.as_bytes("l1"), container(v3))
self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref))
return constant_op.constant(6.0)
with ops.container("l1"):
cnd_true = cond_v2.cond_v2(
constant_op.constant(True), true_fn, false_fn)
self.assertEquals(cnd_true.eval(), 2)
cnd_false = cond_v2.cond_v2(
constant_op.constant(False), true_fn, false_fn)
self.assertEquals(cnd_false.eval(), 6)
v4 = variables.Variable([3])
q4 = data_flow_ops.FIFOQueue(1, dtypes.float32)
v5 = variables.Variable([4])
q5 = data_flow_ops.FIFOQueue(1, dtypes.float32)
self.assertEqual(compat.as_bytes("l1"), container(v4))
self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref))
self.assertEqual(compat.as_bytes(""), container(v5))
self.assertEqual(compat.as_bytes(""), container(q5.queue_ref))
class CondV2ColocationGroupAndDeviceTest(test.TestCase):
def testColocateWithBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3)
def fn2():
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
def testColocateWithInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant([2.0], name="b")
def fn2():
with ops.colocate_with(b.op):
c = constant_op.constant(3.0)
self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups())
return c
with ops.colocate_with(a.op):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant([2.0], name="d")
self.assertEqual([b"loc:@a"], d.op.colocation_groups())
def testColocateWithInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.device("/device:CPU:1"):
b = constant_op.constant([2.0], name="b")
def fn():
with ops.colocate_with(b.op):
c = math_ops.add(a, a, name="c")
return c
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
# We expect there to be two partitions because of the
# colocate_with. We are only running the cond, which has a data
# dependency on `a` but not on `b`. So, without the colocate_with
# we would expect execution on just one device.
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def testDeviceBeforeCond(self):
with ops.Graph().as_default() as g:
with self.session(graph=g):
def fn():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
with ops.device("/device:CPU:0"):
self.assertIn(
compat.as_bytes("CPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn, fn)))
def fn2():
self.assertEqual("", constant_op.constant(3.0).op.device)
return test_ops.device_placement_op()
if test_util.is_gpu_available():
with ops.device("/device:GPU:0"):
self.assertIn(
compat.as_bytes("GPU:0"),
self.evaluate(cond_v2.cond_v2(constant_op.constant(True),
fn2, fn2)))
else:
self.skipTest("Test requrires a GPU to check GPU device placement.")
def testDeviceInAndOutOfCond(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})):
def fn2():
with ops.device("/device:CPU:1"):
c = constant_op.constant(3.0)
self.assertEqual("/device:CPU:1", c.op.device)
return c
with ops.device("/device:CPU:0"):
self.assertEquals(
cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3)
d = constant_op.constant(4.0)
self.assertEqual("/device:CPU:0", d.op.device)
def testDeviceInCondGraphPartitioning(self):
with ops.Graph().as_default() as g:
with self.session(
graph=g,
config=config_pb2.ConfigProto(device_count={"CPU": 2})
) as sess:
def fn():
with ops.device("/device:CPU:1"):
c = math_ops.add(a, a, name="c")
return c
with ops.device("/device:CPU:0"):
a = constant_op.constant([2.0], name="a")
out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
run_metadata = config_pb2.RunMetadata()
sess.run(out_cond_2, options=run_options, run_metadata=run_metadata)
self.assertTrue(len(run_metadata.partition_graphs) >= 2)
def _cond(pred, true_fn, false_fn, name):
if _is_old_cond():
return control_flow_ops.cond(pred, true_fn, false_fn, name=name)
else:
return cond_v2.cond_v2(pred, true_fn, false_fn, name=name)
def _is_old_cond():
return isinstance(ops.get_default_graph()._get_control_flow_context(),
control_flow_ops.CondContext)
if __name__ == "__main__":
test.main()
|
|
import copy
import datetime
from django.core.exceptions import FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils import six
from django.utils.functional import cached_property
class Combinable(object):
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rdiv__(self, other): # Python 2 compatibility
return type(self).__rtruediv__(self, other)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression(object):
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, six.string_types) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == rhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self):
return OrderBy(self)
def desc(self):
return OrderBy(self, descending=True)
class Func(Expression):
"""
An SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if (connection.features.has_native_duration_field and
connection.features.driver_supports_timedelta_args):
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
case_sql, case_params = compiler.compile(case)
case_parts.append(case_sql)
sql_params.extend(case_params)
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
default_sql, default_params = compiler.compile(self.default)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False):
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUEgressAuditACLEntryTemplatesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUEgressAuditACLTemplate(NURESTObject):
""" Represents a EgressAuditACLTemplate in the VSD
Notes:
An egress audit policy is a set of rules defining how network traffic is monitored and mirrored from a domain for Audit purposes
"""
__rest_name__ = "egressauditacltemplate"
__resource_name__ = "egressauditacltemplates"
## Constants
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PRIORITY_TYPE_TOP_AUDIT = "TOP_AUDIT"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a EgressAuditACLTemplate instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> egressauditacltemplate = NUEgressAuditACLTemplate(id=u'xxxx-xxx-xxx-xxx', name=u'EgressAuditACLTemplate')
>>> egressauditacltemplate = NUEgressAuditACLTemplate(data=my_dict)
"""
super(NUEgressAuditACLTemplate, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._default_install_acl_implicit_rules = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._policy_state = None
self._creation_date = None
self._priority = None
self._priority_type = None
self._associated_live_entity_id = None
self._associated_virtual_firewall_policy_id = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_install_acl_implicit_rules", remote_name="defaultInstallACLImplicitRules", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=True)
self.expose_attribute(local_name="priority_type", remote_name="priorityType", attribute_type=str, is_required=False, is_unique=True, choices=[u'TOP_AUDIT'])
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_virtual_firewall_policy_id", remote_name="associatedVirtualFirewallPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.egress_audit_acl_entry_templates = NUEgressAuditACLEntryTemplatesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in the list of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def default_install_acl_implicit_rules(self):
""" Get default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
return self._default_install_acl_implicit_rules
@default_install_acl_implicit_rules.setter
def default_install_acl_implicit_rules(self, value):
""" Set default_install_acl_implicit_rules value.
Notes:
If enabled, implicit rule will allow intra domain traffic by default
This attribute is named `defaultInstallACLImplicitRules` in VSD API.
"""
self._default_install_acl_implicit_rules = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
None
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the ACL entry that determines the order of entries
"""
self._priority = value
@property
def priority_type(self):
""" Get priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
return self._priority_type
@priority_type.setter
def priority_type(self, value):
""" Set priority_type value.
Notes:
Possible values: TOP_AUDIT. This will be the top most of the egres ACL stack
This attribute is named `priorityType` in VSD API.
"""
self._priority_type = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_virtual_firewall_policy_id(self):
""" Get associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
return self._associated_virtual_firewall_policy_id
@associated_virtual_firewall_policy_id.setter
def associated_virtual_firewall_policy_id(self, value):
""" Set associated_virtual_firewall_policy_id value.
Notes:
The ID of the Virtual Firewall Policy, if this was created as part of the Virtual Firewall Policy creation
This attribute is named `associatedVirtualFirewallPolicyID` in VSD API.
"""
self._associated_virtual_firewall_policy_id = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
This option only affects how the children ACL entry priorities of this template/policy are generated when the priority is not specified. If 'false', the priority is generated by incrementing the current highest ACL Entry priority by 100. If 'true', a random priority will be generated, which is advised when creating many entries concurrently without specifying the priority. This will cause the new child ACL entry to get a random, non-predictable, priority. Therefore it is advised to only enable this when allow rules are being created. If any type of ACL entry order is required, keep this value to 'false' and use your own defined priorities, this will make sure there is a clear set of priorities and how traffic is validated against the ACL entries.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
|
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask, jsonify, abort, request
import os
import socket
import logging
import sys
from subprocess32 import check_call, CalledProcessError, call
from werkzeug.exceptions import HTTPException, default_exceptions
from netaddr import IPAddress, IPNetwork
from pycalico.datastore import IF_PREFIX
from pycalico.datastore_errors import DataStoreError
from pycalico.datastore_datatypes import Endpoint
from pycalico.ipam import SequentialAssignment, IPAMClient
FIXED_MAC = "EE:EE:EE:EE:EE:EE"
CONTAINER_NAME = "libnetwork"
ORCHESTRATOR_ID = "docker"
# How long to wait (seconds) for IP commands to complete.
IP_CMD_TIMEOUT = 5
hostname = socket.gethostname()
client = IPAMClient()
# Return all errors as JSON. From http://flask.pocoo.org/snippets/83/
def make_json_app(import_name, **kwargs):
"""
Creates a JSON-oriented Flask app.
All error responses that you don't specifically
manage yourself will have application/json content
type, and will contain JSON like this (just an example):
{ "message": "405: Method Not Allowed" }
"""
def make_json_error(ex):
response = jsonify(message=str(ex))
response.status_code = (ex.code
if isinstance(ex, HTTPException)
else 500)
return response
app = Flask(import_name, **kwargs)
for code in default_exceptions.iterkeys():
app.error_handler_spec[None][code] = make_json_error
return app
app = make_json_app(__name__)
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.INFO)
app.logger.info("Application started")
@app.route('/Plugin.Activate', methods=['POST'])
def activate():
return jsonify({"Implements": ["NetworkDriver"]})
@app.route('/NetworkDriver.CreateNetwork', methods=['POST'])
def create_network():
# force is required since the request doesn't have the correct mimetype
# If the JSON is malformed, then a BadRequest exception is raised,
# which returns a HTTP 400 response.
json_data = request.get_json(force=True)
# Create the "network" as a profile. The network ID is somewhat unwieldy
# so in future we might want to obtain a human readable name for it.
network_id = json_data["NetworkID"]
app.logger.info("Creating profile %s", network_id)
client.create_profile(network_id)
return jsonify({})
@app.route('/NetworkDriver.DeleteNetwork', methods=['POST'])
def delete_network():
json_data = request.get_json(force=True)
# Remove the network. We don't raise an error if the profile is still
# being used by endpoints. We assume libnetwork will enforce this.
# From https://github.com/docker/libnetwork/blob/master/docs/design.md
# LibNetwork will not allow the delete to proceed if there are any
# existing endpoints attached to the Network.
network_id = json_data["NetworkID"]
app.logger.info("Removing profile %s", network_id)
client.remove_profile(network_id)
return jsonify({})
@app.route('/NetworkDriver.CreateEndpoint', methods=['POST'])
def create_endpoint():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
net_id = json_data["NetworkID"]
# Create a calico endpoint object which we can populate and return to
# libnetwork at the end of this method.
ep = Endpoint(hostname, "docker", CONTAINER_NAME, ep_id, "active",
FIXED_MAC)
ep.profile_ids.append(net_id)
# This method is split into three phases that have side effects.
# 1) Assigning IP addresses
# 2) Creating VETHs
# 3) Writing the endpoint to the datastore.
#
# A failure in a later phase attempts to roll back the effects of
# the earlier phases.
# First up is IP assignment. By default we assign both IPv4 and IPv6
# addresses.
# IPv4 failures may abort the request if the address couldn't be assigned.
ipv4_and_gateway(ep)
# IPv6 is currently best effort and won't abort the request.
ipv6_and_gateway(ep)
# Next, create the veth.
try:
create_veth(ep)
except CalledProcessError as e:
# Failed to create or configure the veth.
# Back out the IP assignments and the veth creation.
app.logger.exception(e)
backout_ip_assignments(ep)
remove_veth(ep)
abort(500)
# Finally, write the endpoint to the datastore.
try:
client.set_endpoint(ep)
except DataStoreError as e:
# We've failed to write the endpoint to the datastore.
# Back out the IP assignments and the veth creation.
app.logger.exception(e)
backout_ip_assignments(ep)
remove_veth(ep)
abort(500)
# Everything worked, create the JSON and return it to libnetwork.
assert len(ep.ipv4_nets) == 1
assert len(ep.ipv6_nets) <= 1
iface_json = {"ID": 0,
"Address": str(list(ep.ipv4_nets)[0]),
"MacAddress": ep.mac}
if ep.ipv6_nets:
iface_json["AddressIPv6"] = str(list(ep.ipv6_nets)[0])
return jsonify({"Interfaces": [iface_json]})
@app.route('/NetworkDriver.DeleteEndpoint', methods=['POST'])
def delete_endpoint():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Removing endpoint %s", ep_id)
# Remove the endpoint from the datastore, the IPs that were assigned to
# it and the veth. Even if one fails, try to do the others.
ep = None
try:
ep = client.get_endpoint(hostname=hostname,
orchestrator_id="docker",
workload_id=CONTAINER_NAME,
endpoint_id=ep_id)
backout_ip_assignments(ep)
except (KeyError, DataStoreError) as e:
app.logger.exception(e)
app.logger.warning("Failed to unassign IPs for endpoint %s", ep_id)
if ep:
try:
client.remove_endpoint(ep)
except DataStoreError as e:
app.logger.exception(e)
app.logger.warning("Failed to remove endpoint %s from datastore",
ep_id)
# libnetwork expects us to delete the veth pair. (Note that we only need
# to delete one end).
if ep:
remove_veth(ep)
return jsonify({})
@app.route('/NetworkDriver.EndpointOperInfo', methods=['POST'])
def endpoint_oper_info():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Endpoint operation info requested for %s", ep_id)
# Nothing is supported yet, just pass blank data.
return jsonify({"Value": {}})
@app.route('/NetworkDriver.Join', methods=['POST'])
def join():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Joining endpoint %s", ep_id)
ep = client.get_endpoint(hostname=hostname,
orchestrator_id="docker",
workload_id=CONTAINER_NAME,
endpoint_id=ep_id)
ret_json = {
"InterfaceNames": [{
"SrcName": ep.temp_interface_name(),
"DstPrefix": IF_PREFIX
}],
"Gateway": str(ep.ipv4_gateway),
"StaticRoutes": [{
"Destination": "%s/32" % ep.ipv4_gateway,
"RouteType": 1, # 1 = CONNECTED
"NextHop": "",
"InterfaceID": 0 # 1st interface created in EndpointCreate
}]
}
if ep.ipv6_gateway:
ret_json["GatewayIPv6"] = str(ep.ipv6_gateway)
ret_json["StaticRoutes"].append({
"Destination": "%s/128" % ep.ipv6_gateway,
"RouteType": 1, # 1 = CONNECTED
"NextHop": "",
"InterfaceID": 0 # 1st interface created in EndpointCreate
})
return jsonify(ret_json)
@app.route('/NetworkDriver.Leave', methods=['POST'])
def leave():
json_data = request.get_json(force=True)
ep_id = json_data["EndpointID"]
app.logger.info("Leaving endpoint %s", ep_id)
# Noop. There's nothing to do.
return jsonify({})
def assign_ip(version):
"""
Assign a IP address from the configured pools.
:param version: "v4" for IPv4, "v6" for IPv6.
:return: An IPAddress, or None if an IP couldn't be
assigned
"""
ip = None
assert version in ["v4", "v6"]
# For each configured pool, attempt to assign an IP before giving up.
for pool in client.get_ip_pools(version):
assigner = SequentialAssignment()
ip = assigner.allocate(pool)
if ip is not None:
ip = IPAddress(ip)
break
return ip
def unassign_ip(ip):
"""
Unassign a IP address from the configured pools.
:param ip: IPAddress to unassign.
:return: True if the unassignment succeeded. False otherwise.
"""
# For each configured pool, attempt to unassign the IP before giving up.
version = "v%d" % ip.version
for pool in client.get_ip_pools(version):
if ip in pool:
if client.unassign_address(pool, ip):
return True
return False
def ipv4_and_gateway(ep):
# Get the gateway before trying to assign an address. This will avoid
# needing to backout the assignment if fetching the gateway fails.
try:
next_hop = client.get_default_next_hops(hostname)[4]
except KeyError as e:
app.logger.exception(e)
abort(500)
ip = assign_ip("v4")
app.logger.info("Assigned IPv4 %s", ip)
if not ip:
app.logger.error("Failed to allocate IPv4 for endpoint %s",
ep.endpoint_id)
abort(500)
ip = IPNetwork(ip)
ep.ipv4_nets.add(ip)
ep.ipv4_gateway = next_hop
def ipv6_and_gateway(ep):
try:
next_hop6 = client.get_default_next_hops(hostname)[6]
except KeyError:
app.logger.info("Couldn't find IPv6 gateway for endpoint %s. "
"Skipping IPv6 assignment.",
ep.endpoint_id)
else:
ip6 = assign_ip("v6")
if ip6:
ip6 = IPNetwork(ip6)
ep.ipv6_gateway = next_hop6
ep.ipv6_nets.add(ip6)
else:
app.logger.info("Failed to allocate IPv6 address for endpoint %s",
ep.endpoint_id)
def backout_ip_assignments(ep):
for net in ep.ipv4_nets.union(ep.ipv6_nets):
# The unassignment is best effort. Just log if it fails.
if not unassign_ip(net.ip):
app.logger.warn("Failed to unassign IP %s", net.ip)
def create_veth(ep):
# Create the veth
check_call(['ip', 'link',
'add', ep.name,
'type', 'veth',
'peer', 'name', ep.temp_interface_name()],
timeout=IP_CMD_TIMEOUT)
# Set the host end of the veth to 'up' so felix notices it.
check_call(['ip', 'link', 'set', ep.name, 'up'],
timeout=IP_CMD_TIMEOUT)
# Set the mac as libnetwork doesn't do this for us.
check_call(['ip', 'link', 'set',
'dev', ep.temp_interface_name(),
'address', FIXED_MAC],
timeout=IP_CMD_TIMEOUT)
def remove_veth(ep):
# The veth removal is best effort. If it fails then just log.
rc = call(['ip', 'link', 'del', ep.name], timeout=IP_CMD_TIMEOUT)
if rc != 0:
app.logger.warn("Failed to delete veth %s", ep.name)
if __name__ == '__main__':
# Used when being invoked by the flask development server
PLUGIN_DIR = "/usr/share/docker/plugins/"
if not os.path.exists(PLUGIN_DIR):
os.makedirs(PLUGIN_DIR)
with open(os.path.join(PLUGIN_DIR, 'calico.spec'), 'w') as f:
f.write("tcp://localhost:5000")
# Turns on better error messages and reloading support.
app.debug = True
app.run()
|
|
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.os_config import guest_policy_pb2
from google3.cloud.graphite.mmv2.services.google.os_config import guest_policy_pb2_grpc
from typing import List
class GuestPolicy(object):
def __init__(
self,
name: str = None,
description: str = None,
create_time: str = None,
update_time: str = None,
assignment: dict = None,
packages: list = None,
package_repositories: list = None,
recipes: list = None,
etag: str = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.description = description
self.assignment = assignment
self.packages = packages
self.package_repositories = package_repositories
self.recipes = recipes
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = guest_policy_pb2_grpc.OsconfigBetaGuestPolicyServiceStub(
channel.Channel()
)
request = guest_policy_pb2.ApplyOsconfigBetaGuestPolicyRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if GuestPolicyAssignment.to_proto(self.assignment):
request.resource.assignment.CopyFrom(
GuestPolicyAssignment.to_proto(self.assignment)
)
else:
request.resource.ClearField("assignment")
if GuestPolicyPackagesArray.to_proto(self.packages):
request.resource.packages.extend(
GuestPolicyPackagesArray.to_proto(self.packages)
)
if GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories):
request.resource.package_repositories.extend(
GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories)
)
if GuestPolicyRecipesArray.to_proto(self.recipes):
request.resource.recipes.extend(
GuestPolicyRecipesArray.to_proto(self.recipes)
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyOsconfigBetaGuestPolicy(request)
self.name = Primitive.from_proto(response.name)
self.description = Primitive.from_proto(response.description)
self.create_time = Primitive.from_proto(response.create_time)
self.update_time = Primitive.from_proto(response.update_time)
self.assignment = GuestPolicyAssignment.from_proto(response.assignment)
self.packages = GuestPolicyPackagesArray.from_proto(response.packages)
self.package_repositories = GuestPolicyPackageRepositoriesArray.from_proto(
response.package_repositories
)
self.recipes = GuestPolicyRecipesArray.from_proto(response.recipes)
self.etag = Primitive.from_proto(response.etag)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = guest_policy_pb2_grpc.OsconfigBetaGuestPolicyServiceStub(
channel.Channel()
)
request = guest_policy_pb2.DeleteOsconfigBetaGuestPolicyRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if GuestPolicyAssignment.to_proto(self.assignment):
request.resource.assignment.CopyFrom(
GuestPolicyAssignment.to_proto(self.assignment)
)
else:
request.resource.ClearField("assignment")
if GuestPolicyPackagesArray.to_proto(self.packages):
request.resource.packages.extend(
GuestPolicyPackagesArray.to_proto(self.packages)
)
if GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories):
request.resource.package_repositories.extend(
GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories)
)
if GuestPolicyRecipesArray.to_proto(self.recipes):
request.resource.recipes.extend(
GuestPolicyRecipesArray.to_proto(self.recipes)
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteOsconfigBetaGuestPolicy(request)
@classmethod
def list(self, project, service_account_file=""):
stub = guest_policy_pb2_grpc.OsconfigBetaGuestPolicyServiceStub(
channel.Channel()
)
request = guest_policy_pb2.ListOsconfigBetaGuestPolicyRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListOsconfigBetaGuestPolicy(request).items
def to_proto(self):
resource = guest_policy_pb2.OsconfigBetaGuestPolicy()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if GuestPolicyAssignment.to_proto(self.assignment):
resource.assignment.CopyFrom(
GuestPolicyAssignment.to_proto(self.assignment)
)
else:
resource.ClearField("assignment")
if GuestPolicyPackagesArray.to_proto(self.packages):
resource.packages.extend(GuestPolicyPackagesArray.to_proto(self.packages))
if GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories):
resource.package_repositories.extend(
GuestPolicyPackageRepositoriesArray.to_proto(self.package_repositories)
)
if GuestPolicyRecipesArray.to_proto(self.recipes):
resource.recipes.extend(GuestPolicyRecipesArray.to_proto(self.recipes))
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class GuestPolicyAssignment(object):
def __init__(
self,
group_labels: list = None,
zones: list = None,
instances: list = None,
instance_name_prefixes: list = None,
os_types: list = None,
):
self.group_labels = group_labels
self.zones = zones
self.instances = instances
self.instance_name_prefixes = instance_name_prefixes
self.os_types = os_types
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyAssignment()
if GuestPolicyAssignmentGroupLabelsArray.to_proto(resource.group_labels):
res.group_labels.extend(
GuestPolicyAssignmentGroupLabelsArray.to_proto(resource.group_labels)
)
if Primitive.to_proto(resource.zones):
res.zones.extend(Primitive.to_proto(resource.zones))
if Primitive.to_proto(resource.instances):
res.instances.extend(Primitive.to_proto(resource.instances))
if Primitive.to_proto(resource.instance_name_prefixes):
res.instance_name_prefixes.extend(
Primitive.to_proto(resource.instance_name_prefixes)
)
if GuestPolicyAssignmentOSTypesArray.to_proto(resource.os_types):
res.os_types.extend(
GuestPolicyAssignmentOSTypesArray.to_proto(resource.os_types)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyAssignment(
group_labels=GuestPolicyAssignmentGroupLabelsArray.from_proto(
resource.group_labels
),
zones=Primitive.from_proto(resource.zones),
instances=Primitive.from_proto(resource.instances),
instance_name_prefixes=Primitive.from_proto(
resource.instance_name_prefixes
),
os_types=GuestPolicyAssignmentOSTypesArray.from_proto(resource.os_types),
)
class GuestPolicyAssignmentArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyAssignment.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyAssignment.from_proto(i) for i in resources]
class GuestPolicyAssignmentGroupLabels(object):
def __init__(self, labels: dict = None):
self.labels = labels
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyAssignmentGroupLabels()
if Primitive.to_proto(resource.labels):
res.labels = Primitive.to_proto(resource.labels)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyAssignmentGroupLabels(
labels=Primitive.from_proto(resource.labels),
)
class GuestPolicyAssignmentGroupLabelsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyAssignmentGroupLabels.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyAssignmentGroupLabels.from_proto(i) for i in resources]
class GuestPolicyAssignmentOSTypes(object):
def __init__(
self,
os_short_name: str = None,
os_version: str = None,
os_architecture: str = None,
):
self.os_short_name = os_short_name
self.os_version = os_version
self.os_architecture = os_architecture
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyAssignmentOSTypes()
if Primitive.to_proto(resource.os_short_name):
res.os_short_name = Primitive.to_proto(resource.os_short_name)
if Primitive.to_proto(resource.os_version):
res.os_version = Primitive.to_proto(resource.os_version)
if Primitive.to_proto(resource.os_architecture):
res.os_architecture = Primitive.to_proto(resource.os_architecture)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyAssignmentOSTypes(
os_short_name=Primitive.from_proto(resource.os_short_name),
os_version=Primitive.from_proto(resource.os_version),
os_architecture=Primitive.from_proto(resource.os_architecture),
)
class GuestPolicyAssignmentOSTypesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyAssignmentOSTypes.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyAssignmentOSTypes.from_proto(i) for i in resources]
class GuestPolicyPackages(object):
def __init__(
self, name: str = None, desired_state: str = None, manager: str = None
):
self.name = name
self.desired_state = desired_state
self.manager = manager
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackages()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if GuestPolicyPackagesDesiredStateEnum.to_proto(resource.desired_state):
res.desired_state = GuestPolicyPackagesDesiredStateEnum.to_proto(
resource.desired_state
)
if GuestPolicyPackagesManagerEnum.to_proto(resource.manager):
res.manager = GuestPolicyPackagesManagerEnum.to_proto(resource.manager)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackages(
name=Primitive.from_proto(resource.name),
desired_state=GuestPolicyPackagesDesiredStateEnum.from_proto(
resource.desired_state
),
manager=GuestPolicyPackagesManagerEnum.from_proto(resource.manager),
)
class GuestPolicyPackagesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackages.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackages.from_proto(i) for i in resources]
class GuestPolicyPackageRepositories(object):
def __init__(
self, apt: dict = None, yum: dict = None, zypper: dict = None, goo: dict = None
):
self.apt = apt
self.yum = yum
self.zypper = zypper
self.goo = goo
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositories()
if GuestPolicyPackageRepositoriesApt.to_proto(resource.apt):
res.apt.CopyFrom(GuestPolicyPackageRepositoriesApt.to_proto(resource.apt))
else:
res.ClearField("apt")
if GuestPolicyPackageRepositoriesYum.to_proto(resource.yum):
res.yum.CopyFrom(GuestPolicyPackageRepositoriesYum.to_proto(resource.yum))
else:
res.ClearField("yum")
if GuestPolicyPackageRepositoriesZypper.to_proto(resource.zypper):
res.zypper.CopyFrom(
GuestPolicyPackageRepositoriesZypper.to_proto(resource.zypper)
)
else:
res.ClearField("zypper")
if GuestPolicyPackageRepositoriesGoo.to_proto(resource.goo):
res.goo.CopyFrom(GuestPolicyPackageRepositoriesGoo.to_proto(resource.goo))
else:
res.ClearField("goo")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackageRepositories(
apt=GuestPolicyPackageRepositoriesApt.from_proto(resource.apt),
yum=GuestPolicyPackageRepositoriesYum.from_proto(resource.yum),
zypper=GuestPolicyPackageRepositoriesZypper.from_proto(resource.zypper),
goo=GuestPolicyPackageRepositoriesGoo.from_proto(resource.goo),
)
class GuestPolicyPackageRepositoriesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackageRepositories.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackageRepositories.from_proto(i) for i in resources]
class GuestPolicyPackageRepositoriesApt(object):
def __init__(
self,
archive_type: str = None,
uri: str = None,
distribution: str = None,
components: list = None,
gpg_key: str = None,
):
self.archive_type = archive_type
self.uri = uri
self.distribution = distribution
self.components = components
self.gpg_key = gpg_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesApt()
if GuestPolicyPackageRepositoriesAptArchiveTypeEnum.to_proto(
resource.archive_type
):
res.archive_type = GuestPolicyPackageRepositoriesAptArchiveTypeEnum.to_proto(
resource.archive_type
)
if Primitive.to_proto(resource.uri):
res.uri = Primitive.to_proto(resource.uri)
if Primitive.to_proto(resource.distribution):
res.distribution = Primitive.to_proto(resource.distribution)
if Primitive.to_proto(resource.components):
res.components.extend(Primitive.to_proto(resource.components))
if Primitive.to_proto(resource.gpg_key):
res.gpg_key = Primitive.to_proto(resource.gpg_key)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackageRepositoriesApt(
archive_type=GuestPolicyPackageRepositoriesAptArchiveTypeEnum.from_proto(
resource.archive_type
),
uri=Primitive.from_proto(resource.uri),
distribution=Primitive.from_proto(resource.distribution),
components=Primitive.from_proto(resource.components),
gpg_key=Primitive.from_proto(resource.gpg_key),
)
class GuestPolicyPackageRepositoriesAptArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackageRepositoriesApt.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackageRepositoriesApt.from_proto(i) for i in resources]
class GuestPolicyPackageRepositoriesYum(object):
def __init__(
self,
id: str = None,
display_name: str = None,
base_url: str = None,
gpg_keys: list = None,
):
self.id = id
self.display_name = display_name
self.base_url = base_url
self.gpg_keys = gpg_keys
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesYum()
if Primitive.to_proto(resource.id):
res.id = Primitive.to_proto(resource.id)
if Primitive.to_proto(resource.display_name):
res.display_name = Primitive.to_proto(resource.display_name)
if Primitive.to_proto(resource.base_url):
res.base_url = Primitive.to_proto(resource.base_url)
if Primitive.to_proto(resource.gpg_keys):
res.gpg_keys.extend(Primitive.to_proto(resource.gpg_keys))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackageRepositoriesYum(
id=Primitive.from_proto(resource.id),
display_name=Primitive.from_proto(resource.display_name),
base_url=Primitive.from_proto(resource.base_url),
gpg_keys=Primitive.from_proto(resource.gpg_keys),
)
class GuestPolicyPackageRepositoriesYumArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackageRepositoriesYum.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackageRepositoriesYum.from_proto(i) for i in resources]
class GuestPolicyPackageRepositoriesZypper(object):
def __init__(
self,
id: str = None,
display_name: str = None,
base_url: str = None,
gpg_keys: list = None,
):
self.id = id
self.display_name = display_name
self.base_url = base_url
self.gpg_keys = gpg_keys
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesZypper()
if Primitive.to_proto(resource.id):
res.id = Primitive.to_proto(resource.id)
if Primitive.to_proto(resource.display_name):
res.display_name = Primitive.to_proto(resource.display_name)
if Primitive.to_proto(resource.base_url):
res.base_url = Primitive.to_proto(resource.base_url)
if Primitive.to_proto(resource.gpg_keys):
res.gpg_keys.extend(Primitive.to_proto(resource.gpg_keys))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackageRepositoriesZypper(
id=Primitive.from_proto(resource.id),
display_name=Primitive.from_proto(resource.display_name),
base_url=Primitive.from_proto(resource.base_url),
gpg_keys=Primitive.from_proto(resource.gpg_keys),
)
class GuestPolicyPackageRepositoriesZypperArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackageRepositoriesZypper.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackageRepositoriesZypper.from_proto(i) for i in resources]
class GuestPolicyPackageRepositoriesGoo(object):
def __init__(self, name: str = None, url: str = None):
self.name = name
self.url = url
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesGoo()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.url):
res.url = Primitive.to_proto(resource.url)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyPackageRepositoriesGoo(
name=Primitive.from_proto(resource.name),
url=Primitive.from_proto(resource.url),
)
class GuestPolicyPackageRepositoriesGooArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyPackageRepositoriesGoo.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyPackageRepositoriesGoo.from_proto(i) for i in resources]
class GuestPolicyRecipes(object):
def __init__(
self,
name: str = None,
version: str = None,
artifacts: list = None,
install_steps: list = None,
update_steps: list = None,
desired_state: str = None,
):
self.name = name
self.version = version
self.artifacts = artifacts
self.install_steps = install_steps
self.update_steps = update_steps
self.desired_state = desired_state
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipes()
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.version):
res.version = Primitive.to_proto(resource.version)
if GuestPolicyRecipesArtifactsArray.to_proto(resource.artifacts):
res.artifacts.extend(
GuestPolicyRecipesArtifactsArray.to_proto(resource.artifacts)
)
if GuestPolicyRecipesInstallStepsArray.to_proto(resource.install_steps):
res.install_steps.extend(
GuestPolicyRecipesInstallStepsArray.to_proto(resource.install_steps)
)
if GuestPolicyRecipesUpdateStepsArray.to_proto(resource.update_steps):
res.update_steps.extend(
GuestPolicyRecipesUpdateStepsArray.to_proto(resource.update_steps)
)
if GuestPolicyRecipesDesiredStateEnum.to_proto(resource.desired_state):
res.desired_state = GuestPolicyRecipesDesiredStateEnum.to_proto(
resource.desired_state
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipes(
name=Primitive.from_proto(resource.name),
version=Primitive.from_proto(resource.version),
artifacts=GuestPolicyRecipesArtifactsArray.from_proto(resource.artifacts),
install_steps=GuestPolicyRecipesInstallStepsArray.from_proto(
resource.install_steps
),
update_steps=GuestPolicyRecipesUpdateStepsArray.from_proto(
resource.update_steps
),
desired_state=GuestPolicyRecipesDesiredStateEnum.from_proto(
resource.desired_state
),
)
class GuestPolicyRecipesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipes.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipes.from_proto(i) for i in resources]
class GuestPolicyRecipesArtifacts(object):
def __init__(
self,
id: str = None,
remote: dict = None,
gcs: dict = None,
allow_insecure: bool = None,
):
self.id = id
self.remote = remote
self.gcs = gcs
self.allow_insecure = allow_insecure
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesArtifacts()
if Primitive.to_proto(resource.id):
res.id = Primitive.to_proto(resource.id)
if GuestPolicyRecipesArtifactsRemote.to_proto(resource.remote):
res.remote.CopyFrom(
GuestPolicyRecipesArtifactsRemote.to_proto(resource.remote)
)
else:
res.ClearField("remote")
if GuestPolicyRecipesArtifactsGcs.to_proto(resource.gcs):
res.gcs.CopyFrom(GuestPolicyRecipesArtifactsGcs.to_proto(resource.gcs))
else:
res.ClearField("gcs")
if Primitive.to_proto(resource.allow_insecure):
res.allow_insecure = Primitive.to_proto(resource.allow_insecure)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesArtifacts(
id=Primitive.from_proto(resource.id),
remote=GuestPolicyRecipesArtifactsRemote.from_proto(resource.remote),
gcs=GuestPolicyRecipesArtifactsGcs.from_proto(resource.gcs),
allow_insecure=Primitive.from_proto(resource.allow_insecure),
)
class GuestPolicyRecipesArtifactsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesArtifacts.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesArtifacts.from_proto(i) for i in resources]
class GuestPolicyRecipesArtifactsRemote(object):
def __init__(self, uri: str = None, checksum: str = None):
self.uri = uri
self.checksum = checksum
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesArtifactsRemote()
if Primitive.to_proto(resource.uri):
res.uri = Primitive.to_proto(resource.uri)
if Primitive.to_proto(resource.checksum):
res.checksum = Primitive.to_proto(resource.checksum)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesArtifactsRemote(
uri=Primitive.from_proto(resource.uri),
checksum=Primitive.from_proto(resource.checksum),
)
class GuestPolicyRecipesArtifactsRemoteArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesArtifactsRemote.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesArtifactsRemote.from_proto(i) for i in resources]
class GuestPolicyRecipesArtifactsGcs(object):
def __init__(self, bucket: str = None, object: str = None, generation: int = None):
self.bucket = bucket
self.object = object
self.generation = generation
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesArtifactsGcs()
if Primitive.to_proto(resource.bucket):
res.bucket = Primitive.to_proto(resource.bucket)
if Primitive.to_proto(resource.object):
res.object = Primitive.to_proto(resource.object)
if Primitive.to_proto(resource.generation):
res.generation = Primitive.to_proto(resource.generation)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesArtifactsGcs(
bucket=Primitive.from_proto(resource.bucket),
object=Primitive.from_proto(resource.object),
generation=Primitive.from_proto(resource.generation),
)
class GuestPolicyRecipesArtifactsGcsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesArtifactsGcs.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesArtifactsGcs.from_proto(i) for i in resources]
class GuestPolicyRecipesInstallSteps(object):
def __init__(
self,
file_copy: dict = None,
archive_extraction: dict = None,
msi_installation: dict = None,
dpkg_installation: dict = None,
rpm_installation: dict = None,
file_exec: dict = None,
script_run: dict = None,
):
self.file_copy = file_copy
self.archive_extraction = archive_extraction
self.msi_installation = msi_installation
self.dpkg_installation = dpkg_installation
self.rpm_installation = rpm_installation
self.file_exec = file_exec
self.script_run = script_run
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallSteps()
if GuestPolicyRecipesInstallStepsFileCopy.to_proto(resource.file_copy):
res.file_copy.CopyFrom(
GuestPolicyRecipesInstallStepsFileCopy.to_proto(resource.file_copy)
)
else:
res.ClearField("file_copy")
if GuestPolicyRecipesInstallStepsArchiveExtraction.to_proto(
resource.archive_extraction
):
res.archive_extraction.CopyFrom(
GuestPolicyRecipesInstallStepsArchiveExtraction.to_proto(
resource.archive_extraction
)
)
else:
res.ClearField("archive_extraction")
if GuestPolicyRecipesInstallStepsMsiInstallation.to_proto(
resource.msi_installation
):
res.msi_installation.CopyFrom(
GuestPolicyRecipesInstallStepsMsiInstallation.to_proto(
resource.msi_installation
)
)
else:
res.ClearField("msi_installation")
if GuestPolicyRecipesInstallStepsDpkgInstallation.to_proto(
resource.dpkg_installation
):
res.dpkg_installation.CopyFrom(
GuestPolicyRecipesInstallStepsDpkgInstallation.to_proto(
resource.dpkg_installation
)
)
else:
res.ClearField("dpkg_installation")
if GuestPolicyRecipesInstallStepsRpmInstallation.to_proto(
resource.rpm_installation
):
res.rpm_installation.CopyFrom(
GuestPolicyRecipesInstallStepsRpmInstallation.to_proto(
resource.rpm_installation
)
)
else:
res.ClearField("rpm_installation")
if GuestPolicyRecipesInstallStepsFileExec.to_proto(resource.file_exec):
res.file_exec.CopyFrom(
GuestPolicyRecipesInstallStepsFileExec.to_proto(resource.file_exec)
)
else:
res.ClearField("file_exec")
if GuestPolicyRecipesInstallStepsScriptRun.to_proto(resource.script_run):
res.script_run.CopyFrom(
GuestPolicyRecipesInstallStepsScriptRun.to_proto(resource.script_run)
)
else:
res.ClearField("script_run")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallSteps(
file_copy=GuestPolicyRecipesInstallStepsFileCopy.from_proto(
resource.file_copy
),
archive_extraction=GuestPolicyRecipesInstallStepsArchiveExtraction.from_proto(
resource.archive_extraction
),
msi_installation=GuestPolicyRecipesInstallStepsMsiInstallation.from_proto(
resource.msi_installation
),
dpkg_installation=GuestPolicyRecipesInstallStepsDpkgInstallation.from_proto(
resource.dpkg_installation
),
rpm_installation=GuestPolicyRecipesInstallStepsRpmInstallation.from_proto(
resource.rpm_installation
),
file_exec=GuestPolicyRecipesInstallStepsFileExec.from_proto(
resource.file_exec
),
script_run=GuestPolicyRecipesInstallStepsScriptRun.from_proto(
resource.script_run
),
)
class GuestPolicyRecipesInstallStepsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesInstallSteps.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesInstallSteps.from_proto(i) for i in resources]
class GuestPolicyRecipesInstallStepsFileCopy(object):
def __init__(
self,
artifact_id: str = None,
destination: str = None,
overwrite: bool = None,
permissions: str = None,
):
self.artifact_id = artifact_id
self.destination = destination
self.overwrite = overwrite
self.permissions = permissions
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsFileCopy()
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.destination):
res.destination = Primitive.to_proto(resource.destination)
if Primitive.to_proto(resource.overwrite):
res.overwrite = Primitive.to_proto(resource.overwrite)
if Primitive.to_proto(resource.permissions):
res.permissions = Primitive.to_proto(resource.permissions)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsFileCopy(
artifact_id=Primitive.from_proto(resource.artifact_id),
destination=Primitive.from_proto(resource.destination),
overwrite=Primitive.from_proto(resource.overwrite),
permissions=Primitive.from_proto(resource.permissions),
)
class GuestPolicyRecipesInstallStepsFileCopyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesInstallStepsFileCopy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesInstallStepsFileCopy.from_proto(i) for i in resources]
class GuestPolicyRecipesInstallStepsArchiveExtraction(object):
def __init__(
self, artifact_id: str = None, destination: str = None, type: str = None
):
self.artifact_id = artifact_id
self.destination = destination
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsArchiveExtraction()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.destination):
res.destination = Primitive.to_proto(resource.destination)
if GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum.to_proto(
resource.type
):
res.type = GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum.to_proto(
resource.type
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsArchiveExtraction(
artifact_id=Primitive.from_proto(resource.artifact_id),
destination=Primitive.from_proto(resource.destination),
type=GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum.from_proto(
resource.type
),
)
class GuestPolicyRecipesInstallStepsArchiveExtractionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesInstallStepsArchiveExtraction.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesInstallStepsArchiveExtraction.from_proto(i)
for i in resources
]
class GuestPolicyRecipesInstallStepsMsiInstallation(object):
def __init__(
self,
artifact_id: str = None,
flags: list = None,
allowed_exit_codes: list = None,
):
self.artifact_id = artifact_id
self.flags = flags
self.allowed_exit_codes = allowed_exit_codes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsMsiInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.flags):
res.flags.extend(Primitive.to_proto(resource.flags))
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsMsiInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
flags=Primitive.from_proto(resource.flags),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
)
class GuestPolicyRecipesInstallStepsMsiInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesInstallStepsMsiInstallation.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesInstallStepsMsiInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesInstallStepsDpkgInstallation(object):
def __init__(self, artifact_id: str = None):
self.artifact_id = artifact_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsDpkgInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsDpkgInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
)
class GuestPolicyRecipesInstallStepsDpkgInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesInstallStepsDpkgInstallation.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesInstallStepsDpkgInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesInstallStepsRpmInstallation(object):
def __init__(self, artifact_id: str = None):
self.artifact_id = artifact_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsRpmInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsRpmInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
)
class GuestPolicyRecipesInstallStepsRpmInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesInstallStepsRpmInstallation.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesInstallStepsRpmInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesInstallStepsFileExec(object):
def __init__(
self,
artifact_id: str = None,
local_path: str = None,
args: list = None,
allowed_exit_codes: list = None,
):
self.artifact_id = artifact_id
self.local_path = local_path
self.args = args
self.allowed_exit_codes = allowed_exit_codes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsFileExec()
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.local_path):
res.local_path = Primitive.to_proto(resource.local_path)
if Primitive.to_proto(resource.args):
res.args.extend(Primitive.to_proto(resource.args))
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsFileExec(
artifact_id=Primitive.from_proto(resource.artifact_id),
local_path=Primitive.from_proto(resource.local_path),
args=Primitive.from_proto(resource.args),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
)
class GuestPolicyRecipesInstallStepsFileExecArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesInstallStepsFileExec.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesInstallStepsFileExec.from_proto(i) for i in resources]
class GuestPolicyRecipesInstallStepsScriptRun(object):
def __init__(
self,
script: str = None,
allowed_exit_codes: list = None,
interpreter: str = None,
):
self.script = script
self.allowed_exit_codes = allowed_exit_codes
self.interpreter = interpreter
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsScriptRun()
if Primitive.to_proto(resource.script):
res.script = Primitive.to_proto(resource.script)
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
if GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum.to_proto(
resource.interpreter
):
res.interpreter = GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum.to_proto(
resource.interpreter
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesInstallStepsScriptRun(
script=Primitive.from_proto(resource.script),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
interpreter=GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum.from_proto(
resource.interpreter
),
)
class GuestPolicyRecipesInstallStepsScriptRunArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesInstallStepsScriptRun.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesInstallStepsScriptRun.from_proto(i) for i in resources
]
class GuestPolicyRecipesUpdateSteps(object):
def __init__(
self,
file_copy: dict = None,
archive_extraction: dict = None,
msi_installation: dict = None,
dpkg_installation: dict = None,
rpm_installation: dict = None,
file_exec: dict = None,
script_run: dict = None,
):
self.file_copy = file_copy
self.archive_extraction = archive_extraction
self.msi_installation = msi_installation
self.dpkg_installation = dpkg_installation
self.rpm_installation = rpm_installation
self.file_exec = file_exec
self.script_run = script_run
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateSteps()
if GuestPolicyRecipesUpdateStepsFileCopy.to_proto(resource.file_copy):
res.file_copy.CopyFrom(
GuestPolicyRecipesUpdateStepsFileCopy.to_proto(resource.file_copy)
)
else:
res.ClearField("file_copy")
if GuestPolicyRecipesUpdateStepsArchiveExtraction.to_proto(
resource.archive_extraction
):
res.archive_extraction.CopyFrom(
GuestPolicyRecipesUpdateStepsArchiveExtraction.to_proto(
resource.archive_extraction
)
)
else:
res.ClearField("archive_extraction")
if GuestPolicyRecipesUpdateStepsMsiInstallation.to_proto(
resource.msi_installation
):
res.msi_installation.CopyFrom(
GuestPolicyRecipesUpdateStepsMsiInstallation.to_proto(
resource.msi_installation
)
)
else:
res.ClearField("msi_installation")
if GuestPolicyRecipesUpdateStepsDpkgInstallation.to_proto(
resource.dpkg_installation
):
res.dpkg_installation.CopyFrom(
GuestPolicyRecipesUpdateStepsDpkgInstallation.to_proto(
resource.dpkg_installation
)
)
else:
res.ClearField("dpkg_installation")
if GuestPolicyRecipesUpdateStepsRpmInstallation.to_proto(
resource.rpm_installation
):
res.rpm_installation.CopyFrom(
GuestPolicyRecipesUpdateStepsRpmInstallation.to_proto(
resource.rpm_installation
)
)
else:
res.ClearField("rpm_installation")
if GuestPolicyRecipesUpdateStepsFileExec.to_proto(resource.file_exec):
res.file_exec.CopyFrom(
GuestPolicyRecipesUpdateStepsFileExec.to_proto(resource.file_exec)
)
else:
res.ClearField("file_exec")
if GuestPolicyRecipesUpdateStepsScriptRun.to_proto(resource.script_run):
res.script_run.CopyFrom(
GuestPolicyRecipesUpdateStepsScriptRun.to_proto(resource.script_run)
)
else:
res.ClearField("script_run")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateSteps(
file_copy=GuestPolicyRecipesUpdateStepsFileCopy.from_proto(
resource.file_copy
),
archive_extraction=GuestPolicyRecipesUpdateStepsArchiveExtraction.from_proto(
resource.archive_extraction
),
msi_installation=GuestPolicyRecipesUpdateStepsMsiInstallation.from_proto(
resource.msi_installation
),
dpkg_installation=GuestPolicyRecipesUpdateStepsDpkgInstallation.from_proto(
resource.dpkg_installation
),
rpm_installation=GuestPolicyRecipesUpdateStepsRpmInstallation.from_proto(
resource.rpm_installation
),
file_exec=GuestPolicyRecipesUpdateStepsFileExec.from_proto(
resource.file_exec
),
script_run=GuestPolicyRecipesUpdateStepsScriptRun.from_proto(
resource.script_run
),
)
class GuestPolicyRecipesUpdateStepsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesUpdateSteps.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesUpdateSteps.from_proto(i) for i in resources]
class GuestPolicyRecipesUpdateStepsFileCopy(object):
def __init__(
self,
artifact_id: str = None,
destination: str = None,
overwrite: bool = None,
permissions: str = None,
):
self.artifact_id = artifact_id
self.destination = destination
self.overwrite = overwrite
self.permissions = permissions
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsFileCopy()
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.destination):
res.destination = Primitive.to_proto(resource.destination)
if Primitive.to_proto(resource.overwrite):
res.overwrite = Primitive.to_proto(resource.overwrite)
if Primitive.to_proto(resource.permissions):
res.permissions = Primitive.to_proto(resource.permissions)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsFileCopy(
artifact_id=Primitive.from_proto(resource.artifact_id),
destination=Primitive.from_proto(resource.destination),
overwrite=Primitive.from_proto(resource.overwrite),
permissions=Primitive.from_proto(resource.permissions),
)
class GuestPolicyRecipesUpdateStepsFileCopyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesUpdateStepsFileCopy.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesUpdateStepsFileCopy.from_proto(i) for i in resources]
class GuestPolicyRecipesUpdateStepsArchiveExtraction(object):
def __init__(
self, artifact_id: str = None, destination: str = None, type: str = None
):
self.artifact_id = artifact_id
self.destination = destination
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsArchiveExtraction()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.destination):
res.destination = Primitive.to_proto(resource.destination)
if GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum.to_proto(
resource.type
):
res.type = GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum.to_proto(
resource.type
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsArchiveExtraction(
artifact_id=Primitive.from_proto(resource.artifact_id),
destination=Primitive.from_proto(resource.destination),
type=GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum.from_proto(
resource.type
),
)
class GuestPolicyRecipesUpdateStepsArchiveExtractionArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesUpdateStepsArchiveExtraction.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesUpdateStepsArchiveExtraction.from_proto(i)
for i in resources
]
class GuestPolicyRecipesUpdateStepsMsiInstallation(object):
def __init__(
self,
artifact_id: str = None,
flags: list = None,
allowed_exit_codes: list = None,
):
self.artifact_id = artifact_id
self.flags = flags
self.allowed_exit_codes = allowed_exit_codes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsMsiInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.flags):
res.flags.extend(Primitive.to_proto(resource.flags))
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsMsiInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
flags=Primitive.from_proto(resource.flags),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
)
class GuestPolicyRecipesUpdateStepsMsiInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesUpdateStepsMsiInstallation.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesUpdateStepsMsiInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesUpdateStepsDpkgInstallation(object):
def __init__(self, artifact_id: str = None):
self.artifact_id = artifact_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsDpkgInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsDpkgInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
)
class GuestPolicyRecipesUpdateStepsDpkgInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesUpdateStepsDpkgInstallation.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesUpdateStepsDpkgInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesUpdateStepsRpmInstallation(object):
def __init__(self, artifact_id: str = None):
self.artifact_id = artifact_id
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsRpmInstallation()
)
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsRpmInstallation(
artifact_id=Primitive.from_proto(resource.artifact_id),
)
class GuestPolicyRecipesUpdateStepsRpmInstallationArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
GuestPolicyRecipesUpdateStepsRpmInstallation.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
GuestPolicyRecipesUpdateStepsRpmInstallation.from_proto(i)
for i in resources
]
class GuestPolicyRecipesUpdateStepsFileExec(object):
def __init__(
self,
artifact_id: str = None,
local_path: str = None,
args: list = None,
allowed_exit_codes: list = None,
):
self.artifact_id = artifact_id
self.local_path = local_path
self.args = args
self.allowed_exit_codes = allowed_exit_codes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsFileExec()
if Primitive.to_proto(resource.artifact_id):
res.artifact_id = Primitive.to_proto(resource.artifact_id)
if Primitive.to_proto(resource.local_path):
res.local_path = Primitive.to_proto(resource.local_path)
if Primitive.to_proto(resource.args):
res.args.extend(Primitive.to_proto(resource.args))
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsFileExec(
artifact_id=Primitive.from_proto(resource.artifact_id),
local_path=Primitive.from_proto(resource.local_path),
args=Primitive.from_proto(resource.args),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
)
class GuestPolicyRecipesUpdateStepsFileExecArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesUpdateStepsFileExec.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesUpdateStepsFileExec.from_proto(i) for i in resources]
class GuestPolicyRecipesUpdateStepsScriptRun(object):
def __init__(
self,
script: str = None,
allowed_exit_codes: list = None,
interpreter: str = None,
):
self.script = script
self.allowed_exit_codes = allowed_exit_codes
self.interpreter = interpreter
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsScriptRun()
if Primitive.to_proto(resource.script):
res.script = Primitive.to_proto(resource.script)
if int64Array.to_proto(resource.allowed_exit_codes):
res.allowed_exit_codes.extend(
int64Array.to_proto(resource.allowed_exit_codes)
)
if GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum.to_proto(
resource.interpreter
):
res.interpreter = GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum.to_proto(
resource.interpreter
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return GuestPolicyRecipesUpdateStepsScriptRun(
script=Primitive.from_proto(resource.script),
allowed_exit_codes=int64Array.from_proto(resource.allowed_exit_codes),
interpreter=GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum.from_proto(
resource.interpreter
),
)
class GuestPolicyRecipesUpdateStepsScriptRunArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [GuestPolicyRecipesUpdateStepsScriptRun.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [GuestPolicyRecipesUpdateStepsScriptRun.from_proto(i) for i in resources]
class GuestPolicyPackagesDesiredStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackagesDesiredStateEnum.Value(
"OsconfigBetaGuestPolicyPackagesDesiredStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackagesDesiredStateEnum.Name(
resource
)[len("OsconfigBetaGuestPolicyPackagesDesiredStateEnum") :]
class GuestPolicyPackagesManagerEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackagesManagerEnum.Value(
"OsconfigBetaGuestPolicyPackagesManagerEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackagesManagerEnum.Name(
resource
)[len("OsconfigBetaGuestPolicyPackagesManagerEnum") :]
class GuestPolicyPackageRepositoriesAptArchiveTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesAptArchiveTypeEnum.Value(
"OsconfigBetaGuestPolicyPackageRepositoriesAptArchiveTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyPackageRepositoriesAptArchiveTypeEnum.Name(
resource
)[
len("OsconfigBetaGuestPolicyPackageRepositoriesAptArchiveTypeEnum") :
]
class GuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum.Value(
"OsconfigBetaGuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum.Name(
resource
)[
len("OsconfigBetaGuestPolicyRecipesInstallStepsArchiveExtractionTypeEnum") :
]
class GuestPolicyRecipesInstallStepsScriptRunInterpreterEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsScriptRunInterpreterEnum.Value(
"OsconfigBetaGuestPolicyRecipesInstallStepsScriptRunInterpreterEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesInstallStepsScriptRunInterpreterEnum.Name(
resource
)[
len("OsconfigBetaGuestPolicyRecipesInstallStepsScriptRunInterpreterEnum") :
]
class GuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum.Value(
"OsconfigBetaGuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum.Name(
resource
)[
len("OsconfigBetaGuestPolicyRecipesUpdateStepsArchiveExtractionTypeEnum") :
]
class GuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum.Value(
"OsconfigBetaGuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum.Name(
resource
)[
len("OsconfigBetaGuestPolicyRecipesUpdateStepsScriptRunInterpreterEnum") :
]
class GuestPolicyRecipesDesiredStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesDesiredStateEnum.Value(
"OsconfigBetaGuestPolicyRecipesDesiredStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return guest_policy_pb2.OsconfigBetaGuestPolicyRecipesDesiredStateEnum.Name(
resource
)[len("OsconfigBetaGuestPolicyRecipesDesiredStateEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core conversion logic, serves as main point of access."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import imp
import unittest
import gast
from tensorflow.python.autograph import operators
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.converters import arg_defaults
from tensorflow.python.autograph.converters import asserts
from tensorflow.python.autograph.converters import break_statements
from tensorflow.python.autograph.converters import builtin_functions
from tensorflow.python.autograph.converters import call_trees
from tensorflow.python.autograph.converters import conditional_expressions
from tensorflow.python.autograph.converters import continue_statements
from tensorflow.python.autograph.converters import control_flow
from tensorflow.python.autograph.converters import directives
from tensorflow.python.autograph.converters import error_handlers
from tensorflow.python.autograph.converters import function_scopes
from tensorflow.python.autograph.converters import lists
from tensorflow.python.autograph.converters import logical_expressions
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.converters import side_effect_guards
from tensorflow.python.autograph.converters import slices
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import errors as ag_errors
from tensorflow.python.autograph.core import function_wrapping
from tensorflow.python.autograph.core import naming
from tensorflow.python.autograph.core import unsupported_features_checker
from tensorflow.python.autograph.lang import special_functions
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import errors
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import pretty_printer
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.utils import ag_logging as logging
from tensorflow.python.util import tf_inspect
# TODO(mdan): Might we not need any renaming at all?
def is_whitelisted_for_graph(o):
"""Check whether an entity is whitelisted for use in graph mode.
Examples of whitelisted entities include all members of the tensorflow
package.
Args:
o: A Python entity.
Returns:
Boolean
"""
# TODO(b/120224672): Fix this.
if isinstance(o, functools.partial):
# tf_inspect.getmodule(functools.partial(...)) otherwise returns None since
# functools.partial objects do not have a __module__ attribute.
m = functools
else:
m = tf_inspect.getmodule(o)
if hasattr(m, '__name__'):
# Builtins typically have unnamed modules.
for prefix, in config.DEFAULT_UNCOMPILED_MODULES:
if m.__name__.startswith(prefix):
logging.log(2, 'Whitelisted: %s: name starts with "%s"', o, prefix)
return True
# Temporary -- whitelist tensorboard modules.
# TODO(b/122731813): Remove.
if m.__name__ == 'tensorboard' or '.tensorboard' in m.__name__:
logging.log(2, 'Whitelisted: %s: name contains "tensorboard"', o)
return True
if hasattr(o, 'autograph_info__') or hasattr(o, '__ag_compiled'):
logging.log(2, 'Whitelisted: %s: already converted', o)
return True
if hasattr(o, '__call__'):
# Callable objects: whitelisted if their __call__ method is.
# The type check avoids infinite recursion around the __call__ method
# of function objects.
if (type(o) != type(o.__call__)) and is_whitelisted_for_graph(o.__call__): # pylint: disable=unidiomatic-typecheck
logging.log(2, 'Whitelisted: %s: object __call__ whitelisted', o)
return True
owner_class = None
if tf_inspect.ismethod(o):
# Methods of whitelisted classes are also whitelisted, even if they are
# bound via user subclasses.
#
# For example, suppose `tf.Foo` has a method called `bar`, and `baz` is
# defined as below. `tf.Foo` is whitelisted. Then `baz.bar` is also
# whitelisted.
#
# class Custom(tf.Foo):
# pass
#
# baz = Custom()
#
# For the example above, if `Custom` did overload `bar`, then it would no
# longer be whitelisted.
owner_class = inspect_utils.getmethodclass(o)
if owner_class is not None:
if issubclass(owner_class, unittest.TestCase):
logging.log(2, 'Whitelisted: %s: method of TestCase subclass', o)
return True
owner_class = inspect_utils.getdefiningclass(o, owner_class)
if is_whitelisted_for_graph(owner_class):
logging.log(2, 'Whitelisted: %s: owner is whitelisted %s', o,
owner_class)
return True
if inspect_utils.isnamedtuple(o):
# Due to the way they're constructed, namedtuple types cannot be converted
# because they don't expose source code. But we assume they are safe for
# graph mode since they are just containers.
if tf_inspect.isclass(o) and len(o.__bases__) > 1:
logging.warn(
'Entity {} looks like a namedtuple subclass. Its constructor will'
' not be converted by AutoGraph, but if it has any custom methods,'
' those will be.'.format(o), 1)
logging.log(2, 'Whitelisted: %s: named tuple', o)
return True
logging.log(2, 'Not whitelisted: %s: default rule', o)
return False
def entity_to_graph(o, program_ctx, arg_values, arg_types):
"""Compile a Python entity into equivalent TensorFlow.
The function will also recursively compile all the entities that `o`
references, updating `dependency_cache`.
This function is reentrant, and relies on dependency_cache to avoid
generating duplicate code.
Args:
o: A Python entity.
program_ctx: A ProgramContext object.
arg_values: A dict containing value hints for symbols like function
parameters.
arg_types: A dict containing type hints for symbols like function
parameters.
Returns:
A tuple (ast, new_name, namespace):
* ast: An AST representing an entity with interface equivalent to `o`,
but which when executed it creates TF a graph.
* new_name: The symbol name under which the new entity can be found.
* namespace: A dict mapping all symbols visible to the converted entity,
keyed by their symbol name.
Raises:
ValueError: if the entity type is not supported.
"""
logging.log(1, 'Converting %s', o)
if tf_inspect.isclass(o):
nodes, name, ns = class_to_graph(o, program_ctx)
elif tf_inspect.isfunction(o):
nodes, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
elif tf_inspect.ismethod(o):
nodes, name, ns = function_to_graph(o, program_ctx, arg_values, arg_types)
# TODO(mdan,yashkatariya): Remove when object conversion is implemented.
elif hasattr(o, '__class__'):
raise NotImplementedError(
'Object conversion is not yet supported. If you are '
'trying to convert code that uses an existing object, '
'try including the creation of that object in the '
'conversion. For example, instead of converting the method '
'of a class, try converting the entire class instead. '
'See https://github.com/tensorflow/tensorflow/blob/master/tensorflow/'
'python/autograph/README.md#using-the-functional-api '
'for more information.')
else:
raise ValueError(
'Entity "%s" has unsupported type "%s". Only functions and classes are '
'supported for now.' % (o, type(o)))
# TODO(mdan): This is temporary. it should be created using a converter.
# TODO(mdan): The attribute should be added with a helper, not directly.
# The helper can ensure there are no collisions.
template = '''
entity.autograph_info__ = {}
'''
nodes.extend(templates.replace(template, entity=name))
if logging.has_verbosity(2):
logging.log(2, 'Compiled output of %s:\n\n%s\n', o,
compiler.ast_to_source(nodes))
if logging.has_verbosity(4):
for n in nodes:
logging.log(4, 'Compiled AST of %s:\n\n%s\n\n', o,
pretty_printer.fmt(n, color=False))
return nodes, name, ns
def class_to_graph(c, program_ctx):
"""Specialization of `entity_to_graph` for classes."""
# TODO(mdan): Revisit this altogether. Not sure we still need it.
converted_members = {}
method_filter = lambda m: tf_inspect.isfunction(m) or tf_inspect.ismethod(m)
members = tf_inspect.getmembers(c, predicate=method_filter)
if not members:
raise ValueError('Cannot convert %s: it has no member methods.' % c)
class_namespace = {}
for _, m in members:
# Only convert the members that are directly defined by the class.
if inspect_utils.getdefiningclass(m, c) is not c:
continue
nodes, _, namespace = function_to_graph(
m,
program_ctx=program_ctx,
arg_values={},
arg_types={'self': (c.__name__, c)},
do_rename=False)
if class_namespace is None:
class_namespace = namespace
else:
class_namespace.update(namespace)
converted_members[m] = nodes[0]
namer = naming.Namer(class_namespace)
class_name = namer.class_name(c.__name__)
# Process any base classes: if the superclass if of a whitelisted type, an
# absolute import line is generated.
output_nodes = []
renames = {}
base_names = []
for base in c.__bases__:
if isinstance(object, base):
base_names.append('object')
continue
if is_whitelisted_for_graph(base):
alias = namer.new_symbol(base.__name__, ())
output_nodes.append(
gast.ImportFrom(
module=base.__module__,
names=[gast.alias(name=base.__name__, asname=alias)],
level=0))
else:
raise NotImplementedError(
'Conversion of classes that do not directly extend classes from'
' whitelisted modules is temporarily suspended. If this breaks'
' existing code please notify the AutoGraph team immediately.')
base_names.append(alias)
renames[qual_names.QN(base.__name__)] = qual_names.QN(alias)
# Generate the definition of the converted class.
bases = [gast.Name(n, gast.Load(), None) for n in base_names]
class_def = gast.ClassDef(
class_name,
bases=bases,
keywords=[],
body=list(converted_members.values()),
decorator_list=[])
# Make a final pass to replace references to the class or its base classes.
# Most commonly, this occurs when making super().__init__() calls.
# TODO(mdan): Making direct references to superclass' superclass will fail.
class_def = qual_names.resolve(class_def)
renames[qual_names.QN(c.__name__)] = qual_names.QN(class_name)
class_def = ast_util.rename_symbols(class_def, renames)
output_nodes.append(class_def)
return output_nodes, class_name, class_namespace
def _add_reserved_symbol(namespace, name, entity):
if name not in namespace:
namespace[name] = entity
elif namespace[name] != entity:
raise ValueError('The name "%s" is reserved and may not be used.' % name)
ag_internal = None
# TODO(mdan): Move into core or replace with an actual importable module.
def _add_self_references(namespace, autograph_module):
"""Adds namespace references to the module that exposes the api itself."""
global ag_internal
if ag_internal is None:
# Craft a module that exposes parts of the external API as well as certain
# internal modules.
ag_internal = imp.new_module('autograph')
ag_internal.__dict__.update(autograph_module.__dict__)
ag_internal.ConversionOptions = converter.ConversionOptions
ag_internal.Feature = converter.Feature
ag_internal.utils = utils
ag_internal.function_scope = function_wrapping.function_scope
ag_internal.rewrite_graph_construction_error = (
ag_errors.rewrite_graph_construction_error)
# TODO(mdan): Add safeguards against name clashes.
# We don't want to create a submodule because we want the operators to be
# accessible as ag__.<operator>
ag_internal.__dict__.update(special_functions.__dict__)
ag_internal.__dict__.update(operators.__dict__)
_add_reserved_symbol(namespace, 'ag__', ag_internal)
def function_to_graph(f, program_ctx, arg_values, arg_types, do_rename=True):
"""Specialization of `entity_to_graph` for callable functions."""
node, source = parser.parse_entity(f)
logging.log(3, 'Source code of %s:\n\n%s\n', f, source)
node = node.body[0]
# In general, the output of inspect.getsource is inexact for lambdas because
# it uses regex matching to adjust the exact location around the line number
# that CPython records. Then, the entire containing line is returned, which
# we may have trouble disambiguating. For example:
# x, y = lambda: 1, lambda: 2
if f.__name__ == '<lambda>':
nodes = ast_util.find_matching_definitions(node, f)
if len(nodes) != 1:
raise ValueError(
'Unable to identify source code of lambda function {}. It was'
' defined on this line: {}, which must contain a single lambda with'
' matching signature. To avoid ambiguity, define each lambda'
' in a separate expression.'.format(f, source))
node, = nodes
# TODO(znado): Place inside standard_analysis.
origin_info.resolve(node, source, f)
namespace = inspect_utils.getnamespace(f)
_add_self_references(namespace, program_ctx.autograph_module)
namer = naming.Namer(namespace)
entity_info = transformer.EntityInfo(
source_code=source,
source_file='<fragment>',
namespace=namespace,
arg_values=arg_values,
arg_types=arg_types)
context = converter.EntityContext(namer, entity_info, program_ctx)
try:
node = node_to_graph(node, context)
except (ValueError, AttributeError, KeyError, NotImplementedError) as e:
logging.error(1, 'Error converting %s', f, exc_info=True)
raise errors.InternalError('conversion', e)
# TODO(mdan): Catch and rethrow syntax errors.
if isinstance(node, gast.Lambda):
new_name = namer.new_symbol('tf__lambda', ())
node = gast.Assign(
targets=[gast.Name(new_name, gast.Store(), None)], value=node)
elif do_rename:
# TODO(mdan): This somewhat duplicates the renaming logic in call_trees.py
new_name = namer.function_name(f.__name__)
node.name = new_name
else:
new_name = f.__name__
assert node.name == new_name
return [node], new_name, namespace
def node_to_graph(node, context):
"""Convert Python code to equivalent TF graph mode code.
Args:
node: AST, the code to convert.
context: converter.EntityContext
Returns:
A tuple (node, deps):
* node: A Python ast node, representing the converted code.
* deps: A set of strings, the fully qualified names of entity
dependencies that this node has.
"""
# TODO(mdan): Insert list_comprehensions somewhere.
unsupported_features_checker.verify(node)
node = converter.standard_analysis(node, context, is_initial=True)
# Past this point, line numbers are no longer accurate so we ignore the
# source.
# TODO(mdan): Is it feasible to reconstruct intermediate source code?
context.info.source_code = None
node = converter.apply_(node, context, arg_defaults)
node = converter.apply_(node, context, directives)
node = converter.apply_(node, context, break_statements)
if context.program.options.uses(converter.Feature.ASSERT_STATEMENTS):
node = converter.apply_(node, context, asserts)
# Note: sequencing continue canonicalization before for loop one avoids
# dealing with the extra loop increment operation that the for
# canonicalization creates.
node = converter.apply_(node, context, continue_statements)
node = converter.apply_(node, context, return_statements)
if context.program.options.uses(converter.Feature.LISTS):
node = converter.apply_(node, context, lists)
node = converter.apply_(node, context, slices)
if context.program.options.uses(converter.Feature.BUILTIN_FUNCTIONS):
node = converter.apply_(node, context, builtin_functions)
node = converter.apply_(node, context, call_trees)
node = converter.apply_(node, context, control_flow)
node = converter.apply_(node, context, conditional_expressions)
if context.program.options.uses(converter.Feature.LOGICAL_EXPRESSIONS):
node = converter.apply_(node, context, logical_expressions)
if context.program.options.uses(converter.Feature.AUTO_CONTROL_DEPS):
node = converter.apply_(node, context, side_effect_guards)
# TODO(mdan): If function scopes ever does more, the toggle will need moving.
if context.program.options.uses(converter.Feature.NAME_SCOPES):
node = converter.apply_(node, context, function_scopes)
if context.program.options.uses(converter.Feature.ERROR_REWRITING):
node = converter.apply_(node, context, error_handlers)
return node
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
import numpy as np
from math import pi
import unittest
import os
from monty.os.path import which
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator, \
VoronoiNN, JmolNN, MinimumDistanceNN, OpenBabelNN, CovalentBondNN, \
MinimumOKeeffeNN, MinimumVIRENN, \
get_neighbors_of_site_with_index, site_is_of_motif_type, \
NearNeighbors, LocalStructOrderParams, BrunnerNN_reciprocal, \
BrunnerNN_real, BrunnerNN_relative, EconNN, CrystalNN, CutOffDictNN, \
Critic2NN, solid_angle
from pymatgen import Element, Molecule, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
try:
from openbabel import openbabel as ob
from openbabel import pybel as pb
except ImportError:
pb = None
ob = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ValenceIonicRadiusEvaluatorTest(PymatgenTest):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
def tearDown(self):
del self._mgo_uc
del self._mgo_valrad_evaluator
class VoronoiNNTest(PymatgenTest):
def setUp(self):
self.s = self.get_structure('LiFePO4')
self.nn = VoronoiNN(targets=[Element("O")])
self.s_sic = self.get_structure('Si')
self.s_sic["Si"] = {'Si': 0.5, 'C': 0.5}
self.nn_sic = VoronoiNN()
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.nn.get_voronoi_polyhedra(self.s, 0).items()), 8)
def test_get_cn(self):
self.assertAlmostEqual(self.nn.get_cn(
self.s, 0, use_weights=True), 5.809265748999465, 7)
self.assertAlmostEqual(self.nn_sic.get_cn(
self.s_sic, 0, use_weights=True), 4.5381161643940668, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.nn.get_nn(self.s, 0)), 8)
def test_volume(self):
self.nn.targets = None
volume = 0
for n in range(len(self.s)):
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
volume += nn['volume']
self.assertAlmostEqual(self.s.volume, volume)
def test_solid_angle(self):
self.nn.targets = None
for n in range(len(self.s)):
angle = 0
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
angle += nn['solid_angle']
self.assertAlmostEqual(4 * np.pi, angle)
self.assertEqual(solid_angle([0, 0, 0], [[1, 0, 0], [-1, 0, 0], [0, 1, 0]]), pi)
def test_nn_shell(self):
# First, make a SC lattice. Make my math easier
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Get the 1NN shell
self.nn.targets = None
nns = self.nn.get_nn_shell_info(s, 0, 1)
self.assertEqual(6, len(nns))
# Test the 2nd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 2)
self.assertEqual(18, len(nns))
self.assertArrayAlmostEqual([1] * 6,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 2])
self.assertArrayAlmostEqual([2] * 12,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 1])
# Test the 3rd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 3)
for nn in nns:
# Check that the coordinates were set correctly
self.assertArrayAlmostEqual(nn['site'].frac_coords, nn['image'])
# Test with a structure that has unequal faces
cscl = Structure(Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.1045, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.nn.weight = 'area'
nns = self.nn.get_nn_shell_info(cscl, 0, 1)
self.assertEqual(14, len(nns))
self.assertEqual(6, np.isclose([x['weight'] for x in nns],
0.125 / 0.32476).sum()) # Square faces
self.assertEqual(8, np.isclose([x['weight'] for x in nns], 1).sum())
nns = self.nn.get_nn_shell_info(cscl, 0, 2)
# Weight of getting back on to own site
# Square-square hop: 6*5 options times (0.125/0.32476)^2 weight each
# Hex-hex hop: 8*7 options times 1 weight each
self.assertAlmostEqual(60.4444,
np.sum([x['weight'] for x in nns if x['site_index'] == 0]),
places=3)
def test_adj_neighbors(self):
# Make a simple cubic structure
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Compute the NNs with adjacency
self.nn.targets = None
neighbors = self.nn.get_voronoi_polyhedra(s, 0)
# Each neighbor has 4 adjacent neighbors, all orthogonal
for nn_key, nn_info in neighbors.items():
self.assertEqual(4, len(nn_info['adj_neighbors']))
for adj_key in nn_info['adj_neighbors']:
self.assertEqual(0, np.dot(nn_info['normal'], neighbors[adj_key]['normal']))
def test_all_at_once(self):
# Get all of the sites for LiFePO4
all_sites = self.nn.get_all_voronoi_polyhedra(self.s)
# Make sure they are the same as the single-atom ones
for i, site in enumerate(all_sites):
# Compute the tessellation using only one site
by_one = self.nn.get_voronoi_polyhedra(self.s, i)
# Match the coordinates the of the neighbors, as site matching does not seem to work?
all_coords = np.sort([x['site'].coords for x in site.values()], axis=0)
by_one_coords = np.sort([x['site'].coords for x in by_one.values()], axis=0)
self.assertArrayAlmostEqual(all_coords, by_one_coords)
# Test the nn_info operation
all_nn_info = self.nn.get_all_nn_info(self.s)
for i, info in enumerate(all_nn_info):
# Compute using the by-one method
by_one = self.nn.get_nn_info(self.s, i)
# Get the weights
all_weights = sorted([x['weight'] for x in info])
by_one_weights = sorted([x['weight'] for x in by_one])
self.assertArrayAlmostEqual(all_weights, by_one_weights)
def test_Cs2O(self):
"""A problematic structure in the Materials Project"""
strc = Structure([[4.358219, 0.192833, 6.406960], [2.114414, 3.815824, 6.406960],
[0.311360, 0.192833, 7.742498]],
['O', 'Cs', 'Cs'],
[[0, 0, 0], [0.264318, 0.264318, 0.264318], [0.735682, 0.735682, 0.735682]],
coords_are_cartesian=False)
# Compute the voronoi tessellation
result = VoronoiNN().get_all_voronoi_polyhedra(strc)
self.assertEqual(3, len(result))
def test_filtered(self):
nn = VoronoiNN(weight='area')
# Make a bcc crystal
bcc = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu', 'Cu'],
[[0, 0, 0], [0.5, 0.5, 0.5]], coords_are_cartesian=False)
# Compute the weight of the little face
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
little_weight = small_face_area / big_face_area
# Run one test where you get the small neighbors
nn.tol = little_weight * 0.99
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(14, len(nns))
# Run a second test where we screen out little faces
nn.tol = little_weight * 1.01
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(8, len(nns))
# Make sure it works for the `get_all` operation
all_nns = nn.get_all_nn_info(bcc * [2, 2, 2])
self.assertEqual([8, ] * 16, [len(x) for x in all_nns])
def tearDown(self):
del self.s
del self.nn
class JmolNNTest(PymatgenTest):
def setUp(self):
self.jmol = JmolNN()
self.jmol_update = JmolNN(el_radius_updates={"Li": 1})
def test_get_nn(self):
s = self.get_structure('LiFePO4')
# Test the default near-neighbor finder.
nsites_checked = 0
for site_idx, site in enumerate(s):
if site.specie == Element("Li"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 0)
nsites_checked += 1
elif site.specie == Element("Fe"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 6)
nsites_checked += 1
elif site.specie == Element("P"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 4)
nsites_checked += 1
self.assertEqual(nsites_checked, 12)
# Test a user override that would cause Li to show up as 6-coordinated
self.assertEqual(self.jmol_update.get_cn(s, 0), 6)
# Verify get_nn function works
self.assertEqual(len(self.jmol_update.get_nn(s, 0)), 6)
def tearDown(self):
del self.jmol
del self.jmol_update
class OpenBabelNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_orders(self):
strat = OpenBabelNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
# Currently, benzene bonds register either as double or single,
# not aromatic
# Instead of searching for aromatic bonds, we check that bonds are
# detected in the same way from both sides
self.assertEqual(strat.get_nn_info(self.benzene, 0)[0]["weight"],
strat.get_nn_info(self.benzene, 1)[0]["weight"])
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_length(self):
strat = OpenBabelNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
self.assertAlmostEqual(strat.get_nn_info(self.acetylene, 0)[0]["weight"],
1.19,
2)
def tearDown(self):
del self.benzene
del self.acetylene
class CovalentBondNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
def test_nn_orders(self):
strat = CovalentBondNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
benzene = strat.get_nn_info(self.benzene, 0)
self.assertAlmostEqual(benzene[0]["weight"], 1.6596, places=4)
def test_nn_length(self):
strat = CovalentBondNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertAlmostEqual(acetylene[0]["weight"], 1.19, places=2)
def test_bonded_structure(self):
strat = CovalentBondNN()
benzene = strat.get_bonded_structure(self.benzene)
self.assertEqual(len(benzene.find_rings()), 1)
acetylene = strat.get_bonded_structure(self.acetylene)
self.assertEqual(len(acetylene.graph.nodes), 4)
def tearDown(self):
del self.benzene
del self.acetylene
class MiniDistNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.mos2 = Structure(
Lattice([[3.19, 0, 0], [-1.595, 2.763, 0], [0, 0, 17.44]]),
['Mo', 'S', 'S'], [[-1e-06, 1.842, 3.72], [1.595, 0.92, 5.29],
[1.595, 0.92, 2.155]], coords_are_cartesian=True)
self.lifepo4 = self.get_structure("LiFePO4")
self.lifepo4.add_oxidation_state_by_guess()
def test_all_nn_classes(self):
self.assertEqual(MinimumDistanceNN(cutoff=5, get_all_sites=True).get_cn(
self.cscl, 0), 14)
self.assertEqual(MinimumDistanceNN().get_cn(self.diamond, 0), 4)
self.assertEqual(MinimumDistanceNN().get_cn(self.nacl, 0), 6)
self.assertEqual(MinimumDistanceNN().get_cn(self.lifepo4, 0), 6)
self.assertEqual(MinimumDistanceNN(tol=0.01).get_cn(self.cscl, 0), 8)
self.assertEqual(MinimumDistanceNN(tol=0.1).get_cn(self.mos2, 0), 6)
for image in MinimumDistanceNN(tol=0.1).get_nn_images(self.mos2, 0):
self.assertTrue(image in [(0, 0, 0), (0, 1, 0), (-1, 0, 0),
(0, 0, 0), (0, 1, 0), (-1, 0, 0)])
okeeffe = MinimumOKeeffeNN(tol=0.01)
self.assertEqual(okeeffe.get_cn(self.diamond, 0), 4)
self.assertEqual(okeeffe.get_cn(self.nacl, 0), 6)
self.assertEqual(okeeffe.get_cn(self.cscl, 0), 8)
self.assertEqual(okeeffe.get_cn(self.lifepo4, 0), 2)
virenn = MinimumVIRENN(tol=0.01)
self.assertEqual(virenn.get_cn(self.diamond, 0), 4)
self.assertEqual(virenn.get_cn(self.nacl, 0), 6)
self.assertEqual(virenn.get_cn(self.cscl, 0), 8)
self.assertEqual(virenn.get_cn(self.lifepo4, 0), 2)
brunner_recip = BrunnerNN_reciprocal(tol=0.01)
self.assertEqual(brunner_recip.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_recip.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_recip.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_recip.get_cn(self.lifepo4, 0), 6)
brunner_rel = BrunnerNN_relative(tol=0.01)
self.assertEqual(brunner_rel.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_rel.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_rel.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_rel.get_cn(self.lifepo4, 0), 6)
brunner_real = BrunnerNN_real(tol=0.01)
self.assertEqual(brunner_real.get_cn(self.diamond, 0), 4)
self.assertEqual(brunner_real.get_cn(self.nacl, 0), 6)
self.assertEqual(brunner_real.get_cn(self.cscl, 0), 14)
self.assertEqual(brunner_real.get_cn(self.lifepo4, 0), 30)
econn = EconNN()
self.assertEqual(econn.get_cn(self.diamond, 0), 4)
self.assertEqual(econn.get_cn(self.nacl, 0), 6)
self.assertEqual(econn.get_cn(self.cscl, 0), 14)
self.assertEqual(econn.get_cn(self.lifepo4, 0), 6)
voroinn = VoronoiNN(tol=0.5)
self.assertEqual(voroinn.get_cn(self.diamond, 0), 4)
self.assertEqual(voroinn.get_cn(self.nacl, 0), 6)
self.assertEqual(voroinn.get_cn(self.cscl, 0), 8)
self.assertEqual(voroinn.get_cn(self.lifepo4, 0), 6)
crystalnn = CrystalNN()
self.assertEqual(crystalnn.get_cn(self.diamond, 0), 4)
self.assertEqual(crystalnn.get_cn(self.nacl, 0), 6)
self.assertEqual(crystalnn.get_cn(self.cscl, 0), 8)
self.assertEqual(crystalnn.get_cn(self.lifepo4, 0), 6)
def test_get_local_order_params(self):
nn = MinimumDistanceNN()
ops = nn.get_local_order_parameters(self.diamond, 0)
self.assertAlmostEqual(ops['tetrahedral'], 0.9999934389036574)
ops = nn.get_local_order_parameters(self.nacl, 0)
self.assertAlmostEqual(ops['octahedral'], 0.9999995266669)
class MotifIdentificationTest(PymatgenTest):
def setUp(self):
self.silicon = Structure(
Lattice.cubic(5.47),
["Si", "Si", "Si", "Si", "Si", "Si", "Si", "Si"],
[[0.000000, 0.000000, 0.500000],
[0.750000, 0.750000, 0.750000],
[0.000000, 0.500000, 1.000000],
[0.750000, 0.250000, 0.250000],
[0.500000, 0.000000, 1.000000],
[0.250000, 0.750000, 0.250000],
[0.500000, 0.500000, 0.500000],
[0.250000, 0.250000, 0.750000]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["C", "C", "C", "C", "C", "C"], [
[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0],
[0, 0, 1]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"], [
[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0], [1.74937, -1.01, 0],
[-1.74937, -1.01, 0], [0, 0, -2.14]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def test_site_is_of_motif_type(self):
for i in range(self.diamond.num_sites):
self.assertEqual(site_is_of_motif_type(
self.diamond, i), "tetrahedral")
for i in range(self.nacl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.nacl, i), "octahedral")
for i in range(self.cscl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.cscl, i), "bcc")
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, 0), "square pyramidal")
for i in range(1, self.square_pyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, i), "unrecognized")
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, 0), "trigonal bipyramidal")
for i in range(1, self.trigonal_bipyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, i), "unrecognized")
def test_get_neighbors_of_site_with_index(self):
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.nacl, 0)), 6)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.cscl, 0)), 8)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, delta=0.01)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, cutoff=6)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="voronoi")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_OKeeffe")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_VIRE")), 4)
def tearDown(self):
del self.silicon
del self.diamond
del self.nacl
del self.cscl
class NearNeighborTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def set_nn_info(self):
# check conformance
# implicitly assumes that all NearNeighbors subclasses
# will correctly identify bonds in diamond, if it
# can't there are probably bigger problems
subclasses = NearNeighbors.__subclasses__()
for subclass in subclasses:
# Critic2NN has external dependency, is tested separately
if 'Critic2' not in str(subclass):
nn_info = subclass().get_nn_info(self.diamond, 0)
self.assertEqual(nn_info[0]['site_index'], 1)
self.assertEqual(nn_info[0]['image'][0], 1)
def tearDown(self):
del self.diamond
class LocalStructOrderParamsTest(PymatgenTest):
def setUp(self):
self.single_bond = Structure(
Lattice.cubic(10),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [6, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.linear = Structure(
Lattice.cubic(10),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [2, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.bent45 = Structure(
Lattice.cubic(10), ["H", "H", "H"],
[[0, 0, 0], [0.707, 0.707, 0], [0.707, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cubic = Structure(
Lattice.cubic(1),
["H"], [[0, 0, 0]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None)
self.bcc = Structure(
Lattice.cubic(1),
["H", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.fcc = Structure(
Lattice.cubic(1), ["H", "H", "H", "H"],
[[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.hcp = Structure(
Lattice.hexagonal(1, 1.633),
["H", "H"],
[[0.3333, 0.6667, 0.25], [0.6667, 0.3333, 0.75]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice.cubic(1), ["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0.5], [0.75, 0.75, 0.75], [0, 0.5, 0], [0.75, 0.25, 0.25],
[0.5, 0, 0], [0.25, 0.75, 0.25], [0.5, 0.5, 0.5],
[0.25, 0.25, 0.75]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.trigonal_off_plane = Structure(
Lattice.cubic(100),
["H", "H", "H", "H"],
[[0.50, 0.50, 0.50], [0.25, 0.75, 0.25],
[0.25, 0.25, 0.75], [0.75, 0.25, 0.25]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.regular_triangle = Structure(
Lattice.cubic(30), ["H", "H", "H", "H"],
[[15, 15.28867, 15.65], [14.5, 15, 15], [15.5, 15, 15],
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_planar = Structure(
Lattice.cubic(30), ["H", "H", "H", "H"],
[[15, 15.28867, 15], [14.5, 15, 15], [15.5, 15, 15],
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_planar = Structure(
Lattice.cubic(30), ["H", "H", "H", "H", "H"],
[[15, 15, 15], [14.75, 14.75, 15], [14.75, 15.25, 15],
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square = Structure(
Lattice.cubic(30), ["H", "H", "H", "H", "H"],
[[15, 15, 15.707], [14.75, 14.75, 15], [14.75, 15.25, 15],
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.T_shape = Structure(
Lattice.cubic(30), ["H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.5], [15, 15.5, 15],
[15, 14.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice.cubic(30), ["H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.3535], [14.75, 14.75, 15],
[14.75, 15.25, 15], [15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_planar = Structure(
Lattice.cubic(30), ["Xe", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [1.17969, 0, 0], [-1.17969, 0, 0],
[1.90877, -2.24389, 0], [-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_pyramid = Structure(
Lattice.cubic(30), ["Xe", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, 1.17969], [1.17969, 0, 0],
[-1.17969, 0, 0], [1.90877, -2.24389, 0],
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_bipyramid = Structure(
Lattice.cubic(30),
["Xe", "F", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, -1.17969],
[0, -1.6237, 1.17969], [1.17969, 0, 0],
[-1.17969, 0, 0], [1.90877, -2.24389, 0],
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_planar = Structure(
Lattice.cubic(30),
["H", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0.71, 1.2298, 0],
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0],
[1.4199, 0, 0], [-1.4199, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_pyramid = Structure(
Lattice.cubic(30),
["H", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0.71, 1.2298, 0],
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0],
[1.4199, 0, 0], [-1.4199, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_bipyramid = Structure(
Lattice.cubic(30),
["H", "Li", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0, 0, -1.675],
[0.71, 1.2298, 0], [-0.71, 1.2298, 0],
[0.71, -1.2298, 0], [-0.71, -1.2298, 0],
[1.4199, 0, 0], [-1.4199, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_pyramid = Structure(
Lattice.cubic(30),
["P", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramidal = Structure(
Lattice.cubic(30), ["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0], [0, 0, -2.14]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.cuboctahedron = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 14.5, 14.5], [15, 14.5, 15.5],
[15, 15.5, 14.5], [15, 15.5, 15.5],
[14.5, 15, 14.5], [14.5, 15, 15.5], [15.5, 15, 14.5], [15.5, 15, 15.5],
[14.5, 14.5, 15], [14.5, 15.5, 15], [15.5, 14.5, 15], [15.5, 15.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.see_saw_rect = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H"],
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0, 0.0],
[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.sq_face_capped_trig_pris = Structure(
Lattice.cubic(30),
["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0], [-0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.0, 0.7559289460184545, 0.6546536707079771],
[-0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.0, 0.7559289460184545, -0.6546536707079771], [0.0, -1.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
def test_init(self):
self.assertIsNotNone(
LocalStructOrderParams(["cn"], parameters=None, cutoff=0.99))
parameters = [{'norm': 2}]
lostops = LocalStructOrderParams(["cn"], parameters=parameters)
tmp = lostops.get_parameters(0)
parameters[0]['norm'] = 3
self.assertEqual(tmp, lostops.get_parameters(0))
def test_get_order_parameters(self):
# Set up everything.
op_types = ["cn", "bent", "bent", "tet", "oct", "bcc", "q2", "q4",
"q6", "reg_tri", "sq", "sq_pyr_legacy", "tri_bipyr", "sgl_bd",
"tri_plan", "sq_plan", "pent_plan", "sq_pyr", "tri_pyr",
"pent_pyr", "hex_pyr", "pent_bipyr", "hex_bipyr", "T", "cuboct",
"see_saw_rect", "hex_plan_max", "tet_max", "oct_max", "tri_plan_max", "sq_plan_max",
"pent_plan_max", "cuboct_max", "tet_max", "sq_face_cap_trig_pris"]
op_params = [None for i in range(len(op_types))]
op_params[1] = {'TA': 1, 'IGW_TA': 1. / 0.0667}
op_params[2] = {'TA': 45. / 180, 'IGW_TA': 1. / 0.0667}
op_params[33] = {'TA': 0.6081734479693927, 'IGW_TA': 18.33, "fac_AA": 1.5, "exp_cos_AA": 2}
ops_044 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.44)
ops_071 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.71)
ops_087 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.87)
ops_099 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.99)
ops_101 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=1.01)
ops_501 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=5.01)
ops_voro = LocalStructOrderParams(op_types, parameters=op_params)
# Single bond.
op_vals = ops_101.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 1000)
op_vals = ops_501.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 799)
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 0)
# Linear motif.
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[1] * 1000), 1000)
# 45 degrees-bent motif.
op_vals = ops_101.get_order_parameters(self.bent45, 0)
self.assertAlmostEqual(int(op_vals[2] * 1000), 1000)
# T-shape motif.
op_vals = ops_101.get_order_parameters(
self.T_shape, 0, indices_neighs=[1, 2, 3])
self.assertAlmostEqual(int(op_vals[23] * 1000), 1000)
# Cubic structure.
op_vals = ops_099.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 0.0)
self.assertIsNone(op_vals[3])
self.assertIsNone(op_vals[4])
self.assertIsNone(op_vals[5])
self.assertIsNone(op_vals[6])
self.assertIsNone(op_vals[7])
self.assertIsNone(op_vals[8])
op_vals = ops_101.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 6.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 23)
self.assertAlmostEqual(int(op_vals[4] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[5] * 1000), 333)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 763)
self.assertAlmostEqual(int(op_vals[8] * 1000), 353)
self.assertAlmostEqual(int(op_vals[28] * 1000), 1000)
# Bcc structure.
op_vals = ops_087.get_order_parameters(self.bcc, 0)
self.assertAlmostEqual(op_vals[0], 8.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 200)
self.assertAlmostEqual(int(op_vals[4] * 1000), 145)
self.assertAlmostEqual(int(op_vals[5] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Fcc structure.
op_vals = ops_071.get_order_parameters(self.fcc, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 36)
self.assertAlmostEqual(int(op_vals[4] * 1000), 78)
self.assertAlmostEqual(int(op_vals[5] * 1000), -2)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 190)
self.assertAlmostEqual(int(op_vals[8] * 1000), 574)
# Hcp structure.
op_vals = ops_101.get_order_parameters(self.hcp, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 33)
self.assertAlmostEqual(int(op_vals[4] * 1000), 82)
# self.assertAlmostEqual(int(op_vals[5] * 1000), -26)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 97)
self.assertAlmostEqual(int(op_vals[8] * 1000), 484)
# Diamond structure.
op_vals = ops_044.get_order_parameters(self.diamond, 0)
self.assertAlmostEqual(op_vals[0], 4.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[4] * 1000), 37)
self.assertAlmostEqual(op_vals[5], 0.75)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
self.assertAlmostEqual(int(op_vals[27] * 1000), 1000)
# Trigonal off-plane molecule.
op_vals = ops_044.get_order_parameters(self.trigonal_off_plane, 0)
self.assertAlmostEqual(op_vals[0], 3.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[33] * 1000), 1000)
# Trigonal-planar motif.
op_vals = ops_101.get_order_parameters(self.trigonal_planar, 0)
self.assertEqual(int(op_vals[0] + 0.5), 3)
self.assertAlmostEqual(int(op_vals[14] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[29] * 1000 + 0.5), 1000)
# Regular triangle motif.
op_vals = ops_101.get_order_parameters(self.regular_triangle, 0)
self.assertAlmostEqual(int(op_vals[9] * 1000), 999)
# Square-planar motif.
op_vals = ops_101.get_order_parameters(self.square_planar, 0)
self.assertAlmostEqual(int(op_vals[15] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[30] * 1000 + 0.5), 1000)
# Square motif.
op_vals = ops_101.get_order_parameters(self.square, 0)
self.assertAlmostEqual(int(op_vals[10] * 1000), 1000)
# Pentagonal planar.
op_vals = ops_101.get_order_parameters(
self.pentagonal_planar.sites, 0, indices_neighs=[1, 2, 3, 4, 5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 126)
self.assertAlmostEqual(int(op_vals[16] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[31] * 1000 + 0.5), 1000)
# Trigonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.trigonal_pyramid, 0, indices_neighs=[1, 2, 3, 4])
self.assertAlmostEqual(int(op_vals[18] * 1000 + 0.5), 1000)
# Square pyramid motif.
op_vals = ops_101.get_order_parameters(self.square_pyramid, 0)
self.assertAlmostEqual(int(op_vals[11] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 667)
self.assertAlmostEqual(int(op_vals[17] * 1000 + 0.5), 1000)
# Pentagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.pentagonal_pyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6])
self.assertAlmostEqual(int(op_vals[19] * 1000 + 0.5), 1000)
# Hexagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_pyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6, 7])
self.assertAlmostEqual(int(op_vals[20] * 1000 + 0.5), 1000)
# Trigonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.trigonal_bipyramidal.sites, 0, indices_neighs=[1, 2, 3, 4, 5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 1000)
# Pentagonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.pentagonal_bipyramid.sites, 0,
indices_neighs=[1, 2, 3, 4, 5, 6, 7])
self.assertAlmostEqual(int(op_vals[21] * 1000 + 0.5), 1000)
# Hexagonal bipyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_bipyramid, 0, indices_neighs=[1, 2, 3, 4, 5, 6, 7, 8])
self.assertAlmostEqual(int(op_vals[22] * 1000 + 0.5), 1000)
# Cuboctahedral motif.
op_vals = ops_101.get_order_parameters(
self.cuboctahedron, 0, indices_neighs=[i for i in range(1, 13)])
self.assertAlmostEqual(int(op_vals[24] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[32] * 1000 + 0.5), 1000)
# See-saw motif.
op_vals = ops_101.get_order_parameters(
self.see_saw_rect, 0, indices_neighs=[i for i in range(1, 5)])
self.assertAlmostEqual(int(op_vals[25] * 1000 + 0.5), 1000)
# Hexagonal planar motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_planar, 0, indices_neighs=[1, 2, 3, 4, 5, 6])
self.assertAlmostEqual(int(op_vals[26] * 1000 + 0.5), 1000)
# Square face capped trigonal prism.
op_vals = ops_101.get_order_parameters(
self.sq_face_capped_trig_pris, 0,
indices_neighs=[i for i in range(1, 8)])
self.assertAlmostEqual(int(op_vals[34] * 1000 + 0.5), 1000)
# Test providing explicit neighbor lists.
op_vals = ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[1])
self.assertIsNotNone(op_vals[0])
self.assertIsNone(op_vals[3])
with self.assertRaises(ValueError):
ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[2])
def tearDown(self):
del self.single_bond
del self.linear
del self.bent45
del self.cubic
del self.fcc
del self.bcc
del self.hcp
del self.diamond
del self.regular_triangle
del self.square
del self.square_pyramid
del self.trigonal_off_plane
del self.trigonal_pyramid
del self.trigonal_planar
del self.square_planar
del self.pentagonal_pyramid
del self.hexagonal_pyramid
del self.pentagonal_bipyramid
del self.T_shape
del self.cuboctahedron
del self.see_saw_rect
class CrystalNNTest(PymatgenTest):
def setUp(self):
self.lifepo4 = self.get_structure('LiFePO4')
self.lifepo4.add_oxidation_state_by_guess()
self.he_bcc = self.get_structure('He_BCC')
self.he_bcc.add_oxidation_state_by_guess()
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_sanity(self):
with self.assertRaises(ValueError):
cnn = CrystalNN()
cnn.get_cn(self.lifepo4, 0, use_weights=True)
with self.assertRaises(ValueError):
cnn = CrystalNN(weighted_cn=True)
cnn.get_cn(self.lifepo4, 0, use_weights=False)
def test_discrete_cn(self):
cnn = CrystalNN()
cn_array = []
expected_array = [6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx))
self.assertSequenceEqual(cn_array, expected_array)
def test_weighted_cn(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.863, 5.8716, 5.863, 5.8716, 5.7182, 5.7182, 5.719,
5.7181, 3.991, 3.991, 3.991, 3.9907, 3.5997, 3.525,
3.4133, 3.4714, 3.4727, 3.4133, 3.525, 3.5997,
3.5997, 3.525, 3.4122, 3.4738, 3.4728, 3.4109,
3.5259, 3.5997]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_weighted_cn_no_oxid(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.8962, 5.8996, 5.8962, 5.8996, 5.7195, 5.7195,
5.7202, 5.7194, 4.0012, 4.0012, 4.0012, 4.0009,
3.3897, 3.2589, 3.1218, 3.1914, 3.1914, 3.1218,
3.2589, 3.3897, 3.3897, 3.2589, 3.1207, 3.1924,
3.1915, 3.1207, 3.2598, 3.3897]
s = self.lifepo4.copy()
s.remove_oxidation_states()
for idx, _ in enumerate(s):
cn_array.append(cnn.get_cn(s, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_fixed_length(self):
cnn = CrystalNN(fingerprint_length=30)
nndata = cnn.get_nn_data(self.lifepo4, 0)
self.assertEqual(len(nndata.cn_weights), 30)
self.assertEqual(len(nndata.cn_nninfo), 30)
def test_cation_anion(self):
cnn = CrystalNN(weighted_cn=True, cation_anion=True)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_x_diff_weight(self):
cnn = CrystalNN(weighted_cn=True, x_diff_weight=0)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_noble_gas_material(self):
cnn = CrystalNN()
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 0)
cnn = CrystalNN(distance_cutoffs=(1.25, 5))
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 8)
def test_shifted_sites(self):
cnn = CrystalNN()
sites = [[0., 0.2, 0.2], [0, 0, 0]]
struct = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites), sites)
bonded_struct = cnn.get_bonded_structure(struct)
sites_shifted = [[1., 0.2, 0.2], [0, 0, 0]]
struct_shifted = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites_shifted),
sites_shifted)
bonded_struct_shifted = cnn.get_bonded_structure(struct_shifted)
self.assertEqual(len(bonded_struct.get_connected_sites(0)),
len(bonded_struct_shifted.get_connected_sites(0)))
class CutOffDictNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = CutOffDictNN({('C', 'C'): 2})
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
nn_null = CutOffDictNN()
self.assertEqual(nn_null.get_cn(self.diamond, 0), 0)
def test_from_preset(self):
nn = CutOffDictNN.from_preset("vesta_2019")
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
# test error thrown on unknown preset
self.assertRaises(ValueError, CutOffDictNN.from_preset, "test")
@unittest.skipIf(not which('critic2'), "critic2 executable not present")
class Critic2NNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = Critic2NN()
# self.assertEqual(nn.get_cn(self.diamond, 0), 4)
if __name__ == '__main__':
unittest.main()
|
|
"""Test EQ3 Max! Thermostats."""
from datetime import timedelta
from maxcube.cube import MaxCube
from maxcube.device import (
MAX_DEVICE_MODE_AUTOMATIC,
MAX_DEVICE_MODE_BOOST,
MAX_DEVICE_MODE_MANUAL,
MAX_DEVICE_MODE_VACATION,
)
from maxcube.thermostat import MaxThermostat
from maxcube.wallthermostat import MaxWallThermostat
import pytest
from homeassistant.components.climate.const import (
ATTR_CURRENT_TEMPERATURE,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_TEMP,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN as CLIMATE_DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_DRY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_NONE,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.maxcube.climate import (
MAX_TEMPERATURE,
MIN_TEMPERATURE,
OFF_TEMPERATURE,
ON_TEMPERATURE,
PRESET_ON,
SUPPORT_FLAGS,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
ATTR_TEMPERATURE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.util import utcnow
from tests.common import async_fire_time_changed
ENTITY_ID = "climate.testroom_testthermostat"
WALL_ENTITY_ID = "climate.testroom_testwallthermostat"
VALVE_POSITION = "valve_position"
async def test_setup_thermostat(hass, cube: MaxCube):
"""Test a successful setup of a thermostat device."""
entity_registry = er.async_get(hass)
assert entity_registry.async_is_registered(ENTITY_ID)
entity = entity_registry.async_get(ENTITY_ID)
assert entity.unique_id == "AABBCCDD01"
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "TestRoom TestThermostat"
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_HEAT
assert state.attributes.get(ATTR_HVAC_MODES) == [
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
]
assert state.attributes.get(ATTR_PRESET_MODES) == [
PRESET_NONE,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
PRESET_AWAY,
PRESET_ON,
]
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_NONE
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == SUPPORT_FLAGS
assert state.attributes.get(ATTR_MAX_TEMP) == MAX_TEMPERATURE
assert state.attributes.get(ATTR_MIN_TEMP) == 5.0
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 19.0
assert state.attributes.get(ATTR_TEMPERATURE) == 20.5
assert state.attributes.get(VALVE_POSITION) == 25
async def test_setup_wallthermostat(hass, cube: MaxCube):
"""Test a successful setup of a wall thermostat device."""
entity_registry = er.async_get(hass)
assert entity_registry.async_is_registered(WALL_ENTITY_ID)
entity = entity_registry.async_get(WALL_ENTITY_ID)
assert entity.unique_id == "AABBCCDD02"
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_OFF
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "TestRoom TestWallThermostat"
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_HEAT
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_NONE
assert state.attributes.get(ATTR_MAX_TEMP) == 29.0
assert state.attributes.get(ATTR_MIN_TEMP) == 5.0
assert state.attributes.get(ATTR_CURRENT_TEMPERATURE) == 19.0
assert state.attributes.get(ATTR_TEMPERATURE) is None
async def test_thermostat_set_hvac_mode_off(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Turn off thermostat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, OFF_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = OFF_TEMPERATURE
thermostat.valve_position = 0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_OFF
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_OFF
assert state.attributes.get(VALVE_POSITION) == 0
wall_state = hass.states.get(WALL_ENTITY_ID)
assert wall_state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_OFF
async def test_thermostat_set_hvac_mode_heat(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, 20.5, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
async def test_thermostat_set_invalid_hvac_mode(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_DRY},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_thermostat_set_temperature(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_TEMPERATURE: 10.0},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(thermostat, 10.0, None)
thermostat.target_temperature = 10.0
thermostat.valve_position = 0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == 10.0
assert state.attributes.get(ATTR_HVAC_ACTION) == CURRENT_HVAC_IDLE
async def test_thermostat_set_no_temperature(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_TARGET_TEMP_HIGH: 29.0,
ATTR_TARGET_TEMP_LOW: 10.0,
},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_thermostat_set_preset_on(hass, cube: MaxCube, thermostat: MaxThermostat):
"""Set preset mode to on."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_ON},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, ON_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = ON_TEMPERATURE
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) is None
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ON
async def test_thermostat_set_preset_comfort(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to comfort."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_COMFORT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, thermostat.comfort_temperature, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = thermostat.comfort_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.comfort_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_COMFORT
async def test_thermostat_set_preset_eco(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to eco."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_ECO},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, thermostat.eco_temperature, MAX_DEVICE_MODE_MANUAL
)
thermostat.mode = MAX_DEVICE_MODE_MANUAL
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_ECO
async def test_thermostat_set_preset_away(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to away."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_AWAY},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_VACATION
)
thermostat.mode = MAX_DEVICE_MODE_VACATION
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_AWAY
async def test_thermostat_set_preset_boost(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to boost."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_BOOST},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_BOOST
)
thermostat.mode = MAX_DEVICE_MODE_BOOST
thermostat.target_temperature = thermostat.eco_temperature
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == thermostat.eco_temperature
assert state.attributes.get(ATTR_PRESET_MODE) == PRESET_BOOST
async def test_thermostat_set_preset_none(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set preset mode to boost."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: PRESET_NONE},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
thermostat, None, MAX_DEVICE_MODE_AUTOMATIC
)
async def test_thermostat_set_invalid_preset(
hass, cube: MaxCube, thermostat: MaxThermostat
):
"""Set hvac mode to heat."""
with pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_PRESET_MODE,
{ATTR_ENTITY_ID: ENTITY_ID, ATTR_PRESET_MODE: "invalid"},
blocking=True,
)
cube.set_temperature_mode.assert_not_called()
async def test_wallthermostat_set_hvac_mode_heat(
hass, cube: MaxCube, wallthermostat: MaxWallThermostat
):
"""Set wall thermostat hvac mode to heat."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: WALL_ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
wallthermostat, MIN_TEMPERATURE, MAX_DEVICE_MODE_MANUAL
)
wallthermostat.target_temperature = MIN_TEMPERATURE
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_HEAT
assert state.attributes.get(ATTR_TEMPERATURE) == MIN_TEMPERATURE
async def test_wallthermostat_set_hvac_mode_auto(
hass, cube: MaxCube, wallthermostat: MaxWallThermostat
):
"""Set wall thermostat hvac mode to auto."""
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: WALL_ENTITY_ID, ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
cube.set_temperature_mode.assert_called_once_with(
wallthermostat, None, MAX_DEVICE_MODE_AUTOMATIC
)
wallthermostat.mode = MAX_DEVICE_MODE_AUTOMATIC
wallthermostat.target_temperature = 23.0
async_fire_time_changed(hass, utcnow() + timedelta(minutes=5))
await hass.async_block_till_done()
state = hass.states.get(WALL_ENTITY_ID)
assert state.state == HVAC_MODE_AUTO
assert state.attributes.get(ATTR_TEMPERATURE) == 23.0
|
|
import re
import sys
import json
import locale
from tornado.options import options
from dateutil import tz
class Filters():
'''
Checks whether the passed in value is considered useful otherwise will return None
will return None on the following values:
None
''
'null'
'undefined'
{}
'''
@staticmethod
def val(val):
if val == None:
return None
if val == 'null':
return None
if val == 'undefined':
return None
if val == 0:
return val
if isinstance(val, basestring) and len(val) == 0:
return None
if isinstance(val, dict) and len(val) == 0:
return None
return val
@staticmethod
def version():
try:
return options.version
except:
return ''
@staticmethod
def str(val):
if not val:
return ''
#TODO: sensibly handle:
# dicts => json
# dates => pretty
# numbers => add commas
return str(val)
'''
Checks for various styles of true.
matches on True, 'true', 'on'
'''
@staticmethod
def is_true(val):
if not val:
return False
if isinstance(val, basestring):
if val == 'True' or val == 'true' or val == 'on':
return True
return False
if val == True:
return True
return False
@staticmethod
def strip_html(data):
if not data:
return
p = re.compile(r'<[^<]*?/?>')
return p.sub('', data)
@staticmethod
def long_timestamp(dt_str, tz="America/New_York"):
utc_dt = Filters._convert_utc_to_local(dt_str, tz)
if utc_dt:
return utc_dt.strftime("%A, %d. %B %Y %I:% %p")
else:
return dt_str
@staticmethod
def short_timestamp(dt_str, tz="America/New_York"):
tz_dt = Filters._convert_utc_to_local(dt_str, tz)
return tz_dt.strftime("%m/%d/%Y %I:%M %p")
@staticmethod
def short_date(dt_str, tz="America/New_York"):
tz_dt = Filters._convert_utc_to_local(dt_str, tz)
return tz_dt.strftime("%m/%d/%Y")
@staticmethod
def ellipsis(data, limit, append='...'):
return (data[:limit] + append) if len(data) > limit else data
'''
filter to translate a dict to json
'''
@staticmethod
def to_json(dict):
return json.dumps(dict, True)
@staticmethod
def idize(str):
return (re.sub(r'[^0-9a-zA-Z]', '_', str)).lower()
@staticmethod
def _convert_utc_to_local(utc_dt, timezone):
try:
from_zone = tz.gettz('UTC')
to_zone = tz.gettz(timezone)
utc_dt = utc_dt.replace(tzinfo=from_zone)
return utc_dt.astimezone(to_zone)
except Exception:
print sys.exc_info()
return None
@staticmethod
def url_pretty(str):
if not str:
return
url = re.sub(r'[^0-9a-zA-Z]', '_', Filters.str(str))
url = re.sub('_+', '_', url)
#max 32 chars.
if len(url) > 32:
url = url[0:32]
return url
@staticmethod
def add_commas(val, as_data_type='int', the_locale=locale.LC_ALL):
locale.setlocale(the_locale, "")
if as_data_type == 'int':
return locale.format('%d', int(val), True)
elif as_data_type == 'float':
return locale.format('%f', float(val), True)
else:
return val
@staticmethod
def get_time_string(str):
if str == "N/A":
return str
parts = str.split("/")
isPM = parts[0].find('am') == -1
if not isPM:
parts[0] = parts[0].replace("am", "")
parts[1] = parts[1].replace("c", "")
if(len(parts[0]) >= 3):
if(len(parts[0]) == 4):
parts[0] = parts[0][0:2] + ":" + parts[0][2:]
else:
parts[0] = parts[0][:1] + ":" + parts[0][1:]
if(len(parts[1]) >= 3):
if(len(parts[1]) == 4):
parts[1] = parts[1][0:2] + ":" + parts[1][2:]
else:
parts[1] = parts[1][:1] + ":" + parts[1][1:]
if isPM:
time = parts[0] + "/" + parts[1] + "c"
else:
time = parts[0] + "am/" + parts[1] + "c"
return time
@staticmethod
def pluralize(str):
pl = Pluralizer()
return pl.plural(str)
'''
Does a get on the dict. will work with dot operator, and not throw an exception
returns default if the key doesn't work
will also work to reach into lists via integer keys.
example:
{
'key1' : {
'subkey' : [{'subsubkey1':9},{}]
}
}
Filters.dict_get('key1.subkey.0.subsubkey1') => 9
'''
@staticmethod
def dict_get(dict, key, default=None):
#Surround this with try in case key is None or not a string or something
try:
keys = key.split(".")
except:
return default
tmp = dict
for k in keys:
try:
tmp = tmp[k]
except TypeError:
#Issue may be that we have something like '0'. Try converting to a number
try:
tmp = tmp[int(k)]
except:
#Either couldn't convert or went out of bounds on list
return default
except:
#Exception other than TypeError probably missing key, so default
return default
return tmp
class Pluralizer():
#
# (pattern, search, replace) regex english plural rules tuple
#
rule_tuple = (
('[ml]ouse$', '([ml])ouse$', '\\1ice'),
('child$', 'child$', 'children'),
('booth$', 'booth$', 'booths'),
('foot$', 'foot$', 'feet'),
('ooth$', 'ooth$', 'eeth'),
('l[eo]af$', 'l([eo])af$', 'l\\1aves'),
('sis$', 'sis$', 'ses'),
('man$', 'man$', 'men'),
('ife$', 'ife$', 'ives'),
('eau$', 'eau$', 'eaux'),
('lf$', 'lf$', 'lves'),
('[sxz]$', '$', 'es'),
('[^aeioudgkprt]h$', '$', 'es'),
('(qu|[^aeiou])y$', 'y$', 'ies'),
('$', '$', 's')
)
def regex_rules(self, rules=rule_tuple):
for line in rules:
pattern, search, replace = line
yield lambda word: re.search(pattern, word) and re.sub(search, replace, word)
def plural(self, noun):
for rule in self.regex_rules():
result = rule(noun)
if result:
return result
class Cycler():
cycle_registry = {}
@staticmethod
def uuid():
import uuid
return uuid.uuid1()
@staticmethod
def cycle(values, name='default'):
if name in Cycler.cycle_registry:
try:
return Cycler.cycle_registry[name].next()
except StopIteration:
Cycler.cycle_registry[name] = iter(values)
return Cycler.cycle_registry[name].next()
else:
Cycler.cycle_registry[name] = iter(values)
return Cycler.cycle_registry[name].next()
|
|
#
# Copyright John Reid 2013, 2014
#
"""
A python package for non-parametric sequence models.
"""
import logging
_logger = logging.getLogger(__name__)
import numpy.random
import math
import seqan.traverse
import seqan
from copy import copy
from collections import defaultdict
# Types we use to create strings and indexes
Value = seqan.DNA
uniformovervalues = 1. / Value.valueSize # uniform distribution over all the values
String = seqan.StringDNA
StringSet = seqan.StringDNASet
ESA = seqan.IndexStringDNASetESA
def quote(s):
"""Wrap the string in quotes."""
return '"%s"' % s
def prefixfor(it):
"""The prefix for a prefix tree iterator is the reverse of
its representative."""
return str(it.representative)[::-1]
def curtail(string, length=40):
"""If the string is longer than length characters, shorten it and replace last
characters with ellipses."""
if len(string) > length:
return '{0}...'.format(string[:length-3])
else:
return string
def make_prefix_index(seqs):
"Make an index out of the reverse of the sequences."
sequences = StringSet()
for seq in seqs:
_logger.debug('Building prefix index from: %s', curtail(seq))
sequences.appendValue(String(seq[::-1]))
return ESA(sequences)
def count_prefixes(prefixindex, prefix_counts=None, i=None):
"""Recursive function that counts how many times each
prefix occurs in the prefixindex.
Complexity: O(n log(n)) where n is the length of the text
"""
from itertools import imap
# Use a dictionary if no counts object provided
if prefix_counts is None:
prefix_counts = dict()
# Use a root topdown iterator if none provided
if i is None:
i = prefixindex.topdown()
# Double-check all occurrences match
assert [i.representative] * i.numOccurrences == \
[prefixindex.text[occ.i1][occ.i2:occ.i2+i.repLength]
for occ in i.occurrences]
# Count how many occurrences match the whole string
prefix_count = i.numOccurrences
copyi = i.copy()
# Alternative calculation for prefix counts
# alt_calculation = sum(imap(
# lambda occ: occ.i2 + i.repLength == len(prefixindex.text[occ.i1]),
# i.occurrences))
# Recurse
if i.goDown():
while True:
# Any occurrences in children do not match the whole string
prefix_count -= i.numOccurrences
# Recurse
count_prefixes(prefixindex, prefix_counts, i.copy())
if not i.goRight():
break
# Update counts if any occurrences matched the whole string
if prefix_count:
prefix_counts[copyi] = prefix_count
_logger.debug('Have %3d prefixes of: "%s"',
prefix_count, str(i.representative)[::-1])
# Check our prefix count against alternative calculation
# assert prefix_count == alt_calculation
# Return counts
return prefix_counts
def count_contexts(prefixindex):
"""Count all the number of times each base is emitted in each context.
Complexity: O(n log(n)) (I think)
"""
context_counts = numpy.zeros((2 * len(prefixindex), Value.valueSize), dtype=int)
def countcontextsforprefix(prefix_i, count):
# context is all but last symbol, reversed
prefix = str(prefix_i.representative)[::-1]
u = prefix[:-1]
#x = prefix_i.representative[prefix_i.repLength-1]
x = prefix_i.representative[0]
#_logger.debug('prefix = "%s"', prefix)
#_logger.debug('u = %s', u)
#_logger.debug('x = %s%s', ' ' * len(u), x)
assert prefix == u + str(x)
#_logger.debug(u[::-1])
#_logger.debug(str(prefix_i.representative)[1:])
assert u[::-1] == str(prefix_i.representative)[1:]
u_i = prefixindex.topdown()
# Check that we can descend the prefix tree to the correct context
if not u_i.goDown(u[::-1]):
raise ValueError('Could not descend context')
if count:
context_counts[u_i.value.id][x.ordValue] += count
seqan.findsuffixes(prefixindex.topdown(), countcontextsforprefix)
return context_counts
def cactomodelfromseqs(seqs):
"""Build a Cacto model from a given set of sequences."""
return CactoModel(make_prefix_index(seqs))
class CactoModel(object):
"""A non-parametric sequence model.
"""
def __init__(self, prefixtree, theta=1., d=0.):
if 0 > d or d >= 1:
raise ValueError('d must satisfy 0 <= d < 1')
if theta <= -d:
raise ValueError('Theta must be > -d')
self._theta = theta
self._d = d
self.prefixindex = prefixtree
self.t = numpy.zeros((2 * len(self.prefixindex), Value.valueSize), dtype=int)
self.s = numpy.zeros((2 * len(self.prefixindex), Value.valueSize), dtype=int)
self._initialise()
def _initialise(self):
"""Initialise the table counts."""
s = count_contexts(self.prefixindex)
def initialise_vertex(it):
"Initialise the vertex the iterator points to."
id_ = it.value.id
for xord, count in enumerate(s[id_]):
for _ in xrange(count):
self._initialise_with(xord, copy(it))
self.s[id_,xord] += 1
return True
seqan.traverse.depthfirsttraversal(self.prefixindex, initialise_vertex)
assert (self.s == s).all()
def _initialise_with(self, xord, i):
"""Take account of drawing x from the context at i in the prefix tree during
model initialisation.
"""
ulen = i.repLength
du = self.d(ulen)
tu = self._tu(i)
oddsoldtable = (self.s[i.value.id,xord] + du * tu[xord]) / (
self.p_xord_given_ui(xord, i) * (self.theta(ulen) + du * tu.sum())
)
poldtable = oddsoldtable / (1 + oddsoldtable)
if numpy.random.uniform() >= poldtable:
# new table
tu[xord] += 1
# go up to the parent context if there is one
if i.goUp():
self._initialise_with(xord, i, s)
def _locate_context(self, u, topdownhistory=False):
"Iterate down to the context u."
if topdownhistory:
i = self.prefixindex.topdownhistory()
else:
i = self.prefixindex.topdown()
i.goDown(u[::-1])
return i
def log_context_counts(self, it):
"""Visitor function to be used in traversal
to log context counts."""
_logger.debug('Context counts: %-10s: %s',
quote(prefixfor(it)), self.s[it.value.id])
return True
def log_table_counts(self, it):
"""Visitor function to be used in traversal
to log table counts."""
_logger.debug('Table counts: %-10s: %s',
quote(prefixfor(it)), self.t[it.value.id])
return True
def _tu(self, i):
"The table counts for the given context."
return self.t[i.value.id]
def _tu_children(self, i):
"Get the counts of tables in the children."
result = numpy.zeros(Value.valueSize, dtype=int)
if i.goDown():
while True:
result += self._tu(i)
if not i.goRight():
break
i.goUp()
return result
def _su(self, i):
"The prefix counts for the given context."
return self.s[i.value.id]
def theta(self, context_len):
"Theta for the context length."
return self._theta
def d(self, context_len):
"Discount parameter for the context length."
return self._d
def calculateposterior(self):
"""Calculate the posterior p(x|u) for all emissions x and contexts u.
Posterior is returned as a numpy array indexed by the vertex id of u
then the ordinal of base x."""
posterior = numpy.zeros((2 * len(self.prefixindex), Value.valueSize), dtype=float)
def visitvertex(it):
# What is the posterior for our parent node?
if it.isRoot:
# No: parent posterior is uniform distribution
parent_posterior = numpy.ones(Value.valueSize) * uniformovervalues
else:
# Yes: parent posterior has already been calculated
parent_posterior = posterior[it.nodeUp.id]
ulen = it.repLength
su = self._su(it)
tu = self._tu(it)
tu_children = self._tu_children(it)
du = self.d(ulen)
thetau = self.theta(ulen)
# Contribution from this node
posterior[it.value.id] = (
su + tu_children - du * tu
+ (thetau + du * tu.sum()) * parent_posterior
) / (
thetau + su.sum() + tu_children.sum()
)
return True
seqan.traverse.topdownhistorytraversal(self.prefixindex.topdownhistory(), visitvertex)
return posterior
def p_xord_given_ui(self, xord, i):
"""Recursive function to determine likelihood, p(x|u).
- xord: The ordinal value of x.
- i: A top down history iterator for the node in the
prefix tree that represents u
"""
ulen = i.repLength
su = self._su(i)
tu = self._tu(i)
tu_children = self._tu_children(i)
du = self.d(ulen)
thetau = self.theta(ulen)
# p(x|sigma(u))
if i.goUp():
p_x_sigmau = self.p_xord_given_ui(xord, i)
else:
p_x_sigmau = uniformovervalues
# Contribution from this node
return (
su[xord] + tu_children[xord] - du * tu[xord]
+ (thetau + du * tu.sum()) * p_x_sigmau
) / (
thetau + su.sum() + tu_children.sum()
)
def _p_xord_given_u(self, i, x, u, p_parent):
"""Recursive function used to determine likelihoods"""
context_len = len(u)
su = self._su(i)
tu = self._tu(i)
tu_children = self._tu_children(i)
d = self.d(context_len)
theta = self.theta(context_len)
pG_x_given_u = (
su[x]
+ tu_children[x]
- d * tu[x]
+ (theta + d * tu.sum()) * p_parent
) / (
theta + sum(su) + tu_children.sum()
)
_logger.debug(
' : p_G(x=%s|u=%-15s) = %.3e',
Value.fromOrdinal(x),
quote(str(i.representative)[::-1]),
pG_x_given_u)
# We should keep descending if we matched the whole of the
# representative so far and there is more tree to descend
# that matches at least part of the rest of u
if (
i.repLength < len(u)
and i.representative == u[:i.repLength]
and i.goDown(u[-1-i.repLength])
):
return self._p_xord_given_u(i, x, u, pG_x_given_u)
else:
# can't go any further down this context
return pG_x_given_u
def p_xord_given_u(self, xord, u):
"p(x|u) where u is the context and xord is the ordinal value of the next symbol"
return self._p_xord_given_u(
self.prefixindex.topdownhistory(),
xord,
u,
uniformovervalues)
def p_x_given_u(self, x, u):
"""p(x|u) where u is the context and x is the next symbol. This is less efficient
than"""
_logger.debug('Evaluating: p_G(x=%s|u=%-15s)', x, quote(u))
return self.p_xord_given_u(x.ordValue, u)
#_logger.debug(' : p_G(x=%s|u=%-15s) = %.3e', x, quote(u), p_x_given_u)
def seqsloglikelihood(self, seqs=None, seqsprefixindex=None, modelposterior=None):
"""The log likelihood of the sequences.
The function builds a prefix tree of the sequences and counts how
many emissions have been made for each context. Then the prefix
tree is descended concurrently to the """
if modelposterior is None:
modelposterior = self.calculateposterior()
if (seqs is None) == (seqsprefixindex is None):
raise ValueError('Please specify exactly one of seqs or seqsprefixindex')
if seqsprefixindex is None:
seqsprefixindex = make_prefix_index(seqs)
# The number of times each base is emitted in each context of the
# sequences
s = count_contexts(seqsprefixindex)
class Visitor(object):
def __init__(self):
self.ll = 0.
def __call__(self, modelit, seqsit, stillsynced):
self.ll += (
s[seqsit.value.id]
* numpy.log(modelposterior[modelit.value.id])).sum()
return True
visitor = Visitor()
seqan.traverse.topdownparalleltraversal(
self.prefixindex.topdownhistory(),
seqsprefixindex.topdownhistory(),
visitor)
return visitor.ll
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from abc import abstractmethod
from collections import OrderedDict
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.address import Address
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.source.wrapped_globs import EagerFilesetWithSpec, FilesetRelPathWrapper
from pants.task.task import Task
from pants.util.dirutil import fast_relpath, safe_delete, safe_walk
logger = logging.getLogger(__name__)
class EmptyDepContext(object):
codegen_types = tuple()
class SimpleCodegenTask(Task):
"""A base-class for code generation for a single target language.
:API: public
"""
# Subclasses may override to provide the type of gen targets the target acts on.
# E.g., JavaThriftLibrary. If not provided, the subclass must implement is_gentarget.
gentarget_type = None
def __init__(self, context, workdir):
"""
Add pass-thru Task Constructor for public API visibility.
:API: public
"""
super(SimpleCodegenTask, self).__init__(context, workdir)
@classmethod
def product_types(cls):
# NB(gmalmquist): This is a hack copied from the old CodeGen base class to get the round manager
# to properly run codegen before resolve and compile. It would be more correct to just have each
# individual codegen class declare what languages it generates, but would cause problems with
# scala. See https://rbcommons.com/s/twitter/r/2540/.
return ['java', 'scala', 'python']
@classmethod
def register_options(cls, register):
super(SimpleCodegenTask, cls).register_options(register)
register('--allow-empty', type=bool, default=True, fingerprint=True,
help='Skip targets with no sources defined.',
advanced=True)
register('--allow-dups', type=bool, fingerprint=True,
help='Allow multiple targets specifying the same sources. If duplicates are '
'allowed, the logic of find_sources will associate generated sources with '
'the least-dependent targets that generate them.',
advanced=True)
@classmethod
def get_fingerprint_strategy(cls):
"""Override this method to use a fingerprint strategy other than the default one.
:API: public
:return: a fingerprint strategy, or None to use the default strategy.
"""
return None
@property
def cache_target_dirs(self):
return True
@property
def validate_sources_present(self):
"""A property indicating whether input targets require sources.
If targets should have sources, the `--allow-empty` flag indicates whether it is a
warning or an error for sources to be missing.
:API: public
"""
return True
def synthetic_target_extra_dependencies(self, target, target_workdir):
"""Gets any extra dependencies generated synthetic targets should have.
This method is optional for subclasses to implement, because some code generators may have no
extra dependencies.
:param Target target: the Target from which we are generating a synthetic Target. E.g., 'target'
might be a JavaProtobufLibrary, whose corresponding synthetic Target would be a JavaLibrary.
It may not be necessary to use this parameter depending on the details of the subclass.
:API: public
:return: a list of dependencies.
"""
return []
def synthetic_target_extra_exports(self, target, target_workdir):
"""Gets any extra exports generated synthetic targets should have.
This method is optional for subclasses to implement, because some code generators may have no
extra exports.
NB: Extra exports must also be present in the extra dependencies.
:param Target target: the Target from which we are generating a synthetic Target. E.g., 'target'
might be a JavaProtobufLibrary, whose corresponding synthetic Target would be a JavaLibrary.
It may not be necessary to use this parameter depending on the details of the subclass.
:API: public
:return: a list of exported targets.
"""
return []
def synthetic_target_type_by_target(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def synthetic_target_type(self, target):
"""The type of target this codegen task generates.
For example, the target type for JaxbGen would simply be JavaLibrary.
:API: public
:return: a type (class) that inherits from Target.
"""
raise NotImplementedError
def is_gentarget(self, target):
"""Predicate which determines whether the target in question is relevant to this codegen task.
E.g., the JaxbGen task considers JaxbLibrary targets to be relevant, and nothing else.
:API: public
:param Target target: The target to check.
:return: True if this class can generate code for the given target, False otherwise.
"""
if self.gentarget_type:
return isinstance(target, self.gentarget_type)
else:
raise NotImplementedError
def ignore_dup(self, tgt1, tgt2, rel_src):
"""Subclasses can override to omit a specific generated source file from dup checking."""
return False
def codegen_targets(self):
"""Finds codegen targets in the dependency graph.
:API: public
:return: an iterable of dependency targets.
"""
return self.context.targets(self.is_gentarget)
def _do_validate_sources_present(self, target):
"""Checks whether sources is empty, and either raises a TaskError or just returns False.
The specifics of this behavior are defined by whether the user sets --allow-empty to True/False:
--allow-empty=False will result in a TaskError being raised in the event of an empty source
set. If --allow-empty=True, this method will just return false and log a warning.
Shared for all SimpleCodegenTask subclasses to help keep errors consistent and descriptive.
:param target: Target to validate.
:return: True if sources is not empty, False otherwise.
"""
if not self.validate_sources_present:
return True
sources = target.sources_relative_to_buildroot()
if not sources:
message = ('Target {} has no sources.'.format(target.address.spec))
if not self.get_options().allow_empty:
raise TaskError(message)
else:
logging.warn(message)
return False
return True
def _get_synthetic_address(self, target, target_workdir):
synthetic_name = target.id
sources_rel_path = os.path.relpath(target_workdir, get_buildroot())
synthetic_address = Address(sources_rel_path, synthetic_name)
return synthetic_address
def execute(self):
with self.invalidated(self.codegen_targets(),
invalidate_dependents=True,
topological_order=True,
fingerprint_strategy=self.get_fingerprint_strategy()) as invalidation_check:
with self.context.new_workunit(name='execute', labels=[WorkUnitLabel.MULTITOOL]):
for vt in invalidation_check.all_vts:
# Build the target and handle duplicate sources.
if not vt.valid:
if self._do_validate_sources_present(vt.target):
self.execute_codegen(vt.target, vt.results_dir)
self._handle_duplicate_sources(vt.target, vt.results_dir)
vt.update()
self._inject_synthetic_target(
vt.target,
vt.results_dir,
vt.cache_key,
)
self._mark_transitive_invalidation_hashes_dirty(
vt.target.address for vt in invalidation_check.all_vts
)
def _mark_transitive_invalidation_hashes_dirty(self, addresses):
self.context.build_graph.walk_transitive_dependee_graph(
addresses,
work=lambda t: t.mark_transitive_invalidation_hash_dirty(),
)
@property
def _copy_target_attributes(self):
"""Return a list of attributes to be copied from the target to derived synthetic targets.
By default, propagates the provides, scope, and tags attributes.
"""
return ['provides', 'tags', 'scope']
def synthetic_target_dir(self, target, target_workdir):
"""
:API: public
"""
return target_workdir
def _create_sources_with_fingerprint(self, target_workdir, fingerprint, files):
"""Create an EagerFilesetWithSpec to pass to the sources argument for synthetic target injection.
We are creating and passing an EagerFilesetWithSpec to the synthetic target injection in the
hopes that it will save the time of having to refingerprint the sources.
:param target_workdir: The directory containing the generated code for the target.
:param fingerprint: the fingerprint of the VersionedTarget with which the EagerFilesetWithSpec
will be created.
:param files: a list of exact paths to generated sources.
"""
results_dir_relpath = os.path.relpath(target_workdir, get_buildroot())
filespec = FilesetRelPathWrapper.to_filespec(
[os.path.join(results_dir_relpath, file) for file in files])
return EagerFilesetWithSpec(results_dir_relpath, filespec=filespec,
files=files, files_hash='{}.{}'.format(fingerprint.id, fingerprint.hash))
def _inject_synthetic_target(
self,
target,
target_workdir,
fingerprint,
):
"""Create, inject, and return a synthetic target for the given target and workdir.
:param target: The target to inject a synthetic target for.
:param target_workdir: The work directory containing the generated code for the target.
:param fingerprint: The fingerprint to create the synthetic target
with to avoid re-fingerprinting.
"""
synthetic_target_type = self.synthetic_target_type(target)
target_workdir = self.synthetic_target_dir(target, target_workdir)
synthetic_extra_dependencies = self.synthetic_target_extra_dependencies(target, target_workdir)
copied_attributes = {}
for attribute in self._copy_target_attributes:
copied_attributes[attribute] = getattr(target, attribute)
if self._supports_exports(synthetic_target_type):
extra_exports = self.synthetic_target_extra_exports(target, target_workdir)
extra_exports_not_in_extra_dependencies = set(extra_exports).difference(
set(synthetic_extra_dependencies))
if len(extra_exports_not_in_extra_dependencies) > 0:
raise self.MismatchedExtraExports(
'Extra synthetic exports included targets not in the extra dependencies: {}. Affected target: {}'
.format(extra_exports_not_in_extra_dependencies, target))
extra_export_specs = {e.address.spec for e in extra_exports}
original_export_specs = self._original_export_specs(target)
union = set(original_export_specs).union(extra_export_specs)
copied_attributes['exports'] = sorted(union)
sources = list(self.find_sources(target, target_workdir))
if fingerprint:
sources = self._create_sources_with_fingerprint(target_workdir, fingerprint, sources)
synthetic_target = self.context.add_new_target(
address=self._get_synthetic_address(target, target_workdir),
target_type=synthetic_target_type,
dependencies=synthetic_extra_dependencies,
sources=sources,
derived_from=target,
**copied_attributes
)
build_graph = self.context.build_graph
# NB(pl): This bypasses the convenience function (Target.inject_dependency) in order
# to improve performance. Note that we can walk the transitive dependee subgraph once
# for transitive invalidation rather than walking a smaller subgraph for every single
# dependency injected.
for dependent_address in build_graph.dependents_of(target.address):
build_graph.inject_dependency(
dependent=dependent_address,
dependency=synthetic_target.address,
)
# NB(pl): See the above comment. The same note applies.
for concrete_dependency_address in build_graph.dependencies_of(target.address):
build_graph.inject_dependency(
dependent=synthetic_target.address,
dependency=concrete_dependency_address,
)
if target in self.context.target_roots:
self.context.target_roots.append(synthetic_target)
return synthetic_target
def _supports_exports(self, target_type):
return hasattr(target_type, 'export_specs')
def _original_export_specs(self, target):
return [t.address.spec for t in target.exports(EmptyDepContext())]
def resolve_deps(self, unresolved_deps):
"""
:API: public
"""
deps = OrderedSet()
for dep in unresolved_deps:
try:
deps.update(self.context.resolve(dep))
except AddressLookupError as e:
raise AddressLookupError('{message}\n on dependency {dep}'.format(message=e, dep=dep))
return deps
@abstractmethod
def execute_codegen(self, target, target_workdir):
"""Generate code for the given target.
:param target: A target to generate code for
:param target_workdir: A clean directory into which to generate code
"""
def find_sources(self, target, target_workdir):
"""Determines what sources were generated by the target after the fact.
This is done by searching the directory where this target's code was generated.
:param Target target: the target for which to find generated sources.
:param path target_workdir: directory containing sources for the target.
:return: A set of filepaths relative to the target_workdir.
:rtype: OrderedSet
"""
return OrderedSet(self._find_sources_in_workdir(target_workdir))
def _find_sources_in_workdir(self, target_workdir):
"""Returns relative sources contained in the given target_workdir."""
for root, _, files in safe_walk(target_workdir):
rel_root = fast_relpath(root, target_workdir)
for name in files:
yield os.path.join(rel_root, name)
def _handle_duplicate_sources(self, target, target_workdir):
"""Handles duplicate sources generated by the given gen target by either failure or deletion.
This method should be called after all dependencies have been injected into the graph, but
before injecting the synthetic version of this target.
NB(gm): Some code generators may re-generate code that their dependent libraries generate.
This results in targets claiming to generate sources that they really don't, so we try to
filter out sources that were actually generated by dependencies of the target. This causes
the code generated by the dependencies to 'win' over the code generated by dependees. By
default, this behavior is disabled, and duplication in generated sources will raise a
TaskError. This is controlled by the --allow-dups flag.
"""
# Compute the raw sources owned by this target.
by_target = self.find_sources(target, target_workdir)
# Walk dependency gentargets and record any sources owned by those targets that are also
# owned by this target.
duplicates_by_target = OrderedDict()
def record_duplicates(dep):
if dep == target or not self.is_gentarget(dep.concrete_derived_from):
return
duped_sources = [s for s in dep.sources_relative_to_source_root() if s in by_target and
not self.ignore_dup(target, dep, s)]
if duped_sources:
duplicates_by_target[dep] = duped_sources
target.walk(record_duplicates)
# If there were no dupes, we're done.
if not duplicates_by_target:
return
# If there were duplicates warn or error.
messages = ['{target} generated sources that had already been generated by dependencies.'
.format(target=target.address.spec)]
for dep, duped_sources in duplicates_by_target.items():
messages.append('\t{} also generated:'.format(dep.concrete_derived_from.address.spec))
messages.extend(['\t\t{}'.format(source) for source in duped_sources])
message = '\n'.join(messages)
if self.get_options().allow_dups:
logger.warn(message)
else:
raise self.DuplicateSourceError(message)
# Finally, remove duplicates from the workdir. This prevents us from having to worry
# about them during future incremental compiles.
for dep, duped_sources in duplicates_by_target.items():
for duped_source in duped_sources:
safe_delete(os.path.join(target_workdir, duped_source))
class DuplicateSourceError(TaskError):
"""A target generated the same code that was generated by one of its dependencies.
This is only thrown when --allow-dups=False.
"""
class MismatchedExtraExports(Exception):
"""An extra export didn't have an accompanying explicit extra dependency for the same target.
NB: Exports without accompanying dependencies are caught during compile, but this error will
allow errors caused by injected exports to be surfaced earlier.
"""
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Union
from croniter import croniter
from dateutil.relativedelta import relativedelta # noqa: F401 for doctest
from airflow.utils import timezone
cron_presets: Dict[str, str] = {
'@hourly': '0 * * * *',
'@daily': '0 0 * * *',
'@weekly': '0 0 * * 0',
'@monthly': '0 0 1 * *',
'@quarterly': '0 0 1 */3 *',
'@yearly': '0 0 1 1 *',
}
# pylint: disable=too-many-branches
def date_range(
start_date: datetime,
end_date: Optional[datetime] = None,
num: Optional[int] = None,
delta: Optional[Union[str, timedelta, relativedelta]] = None,
) -> List[datetime]:
"""
Get a set of dates as a list based on a start, end and delta, delta
can be something that can be added to `datetime.datetime`
or a cron expression as a `str`
.. code-block:: python
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta=timedelta(1))
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 1, 3), delta='0 0 * * *')
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 1, 2, 0, 0),
datetime.datetime(2016, 1, 3, 0, 0)]
date_range(datetime(2016, 1, 1), datetime(2016, 3, 3), delta="0 0 0 * *")
[datetime.datetime(2016, 1, 1, 0, 0), datetime.datetime(2016, 2, 1, 0, 0),
datetime.datetime(2016, 3, 1, 0, 0)]
:param start_date: anchor date to start the series from
:type start_date: datetime.datetime
:param end_date: right boundary for the date range
:type end_date: datetime.datetime
:param num: alternatively to end_date, you can specify the number of
number of entries you want in the range. This number can be negative,
output will always be sorted regardless
:type num: int
:param delta: step length. It can be datetime.timedelta or cron expression as string
:type delta: datetime.timedelta or str or dateutil.relativedelta
"""
if not delta:
return []
if end_date:
if start_date > end_date:
raise Exception("Wait. start_date needs to be before end_date")
if num:
raise Exception("Wait. Either specify end_date OR num")
if not end_date and not num:
end_date = timezone.utcnow()
delta_iscron = False
time_zone = start_date.tzinfo
abs_delta: Union[timedelta, relativedelta]
if isinstance(delta, str):
delta_iscron = True
if timezone.is_localized(start_date):
start_date = timezone.make_naive(start_date, time_zone)
cron = croniter(cron_presets.get(delta, delta), start_date)
elif isinstance(delta, timedelta):
abs_delta = abs(delta)
elif isinstance(delta, relativedelta):
abs_delta = abs(delta)
else:
raise Exception("Wait. delta must be either datetime.timedelta or cron expression as str")
dates = []
if end_date:
if timezone.is_naive(start_date) and not timezone.is_naive(end_date):
end_date = timezone.make_naive(end_date, time_zone)
while start_date <= end_date: # type: ignore
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, time_zone))
else:
dates.append(start_date)
if delta_iscron:
start_date = cron.get_next(datetime)
else:
start_date += abs_delta
else:
num_entries: int = num # type: ignore
for _ in range(abs(num_entries)):
if timezone.is_naive(start_date):
dates.append(timezone.make_aware(start_date, time_zone))
else:
dates.append(start_date)
if delta_iscron and num_entries > 0:
start_date = cron.get_next(datetime)
elif delta_iscron:
start_date = cron.get_prev(datetime)
elif num_entries > 0:
start_date += abs_delta
else:
start_date -= abs_delta
return sorted(dates)
def round_time(dt, delta, start_date=timezone.make_aware(datetime.min)):
"""
Returns the datetime of the form start_date + i * delta
which is closest to dt for any non-negative integer i.
Note that delta may be a datetime.timedelta or a dateutil.relativedelta
>>> round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 1, 2), relativedelta(months=1))
datetime.datetime(2015, 1, 1, 0, 0)
>>> round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 16, 0, 0)
>>> round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 15, 0, 0)
>>> round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
>>> round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(2015, 9, 14, 0, 0))
datetime.datetime(2015, 9, 14, 0, 0)
"""
if isinstance(delta, str):
# It's cron based, so it's easy
time_zone = start_date.tzinfo
start_date = timezone.make_naive(start_date, time_zone)
cron = croniter(delta, start_date)
prev = cron.get_prev(datetime)
if prev == start_date:
return timezone.make_aware(start_date, time_zone)
else:
return timezone.make_aware(prev, time_zone)
# Ignore the microseconds of dt
dt -= timedelta(microseconds=dt.microsecond)
# We are looking for a datetime in the form start_date + i * delta
# which is as close as possible to dt. Since delta could be a relative
# delta we don't know its exact length in seconds so we cannot rely on
# division to find i. Instead we employ a binary search algorithm, first
# finding an upper and lower limit and then dissecting the interval until
# we have found the closest match.
# We first search an upper limit for i for which start_date + upper * delta
# exceeds dt.
upper = 1
while start_date + upper * delta < dt:
# To speed up finding an upper limit we grow this exponentially by a
# factor of 2
upper *= 2
# Since upper is the first value for which start_date + upper * delta
# exceeds dt, upper // 2 is below dt and therefore forms a lower limited
# for the i we are looking for
lower = upper // 2
# We now continue to intersect the interval between
# start_date + lower * delta and start_date + upper * delta
# until we find the closest value
while True:
# Invariant: start + lower * delta < dt <= start + upper * delta
# If start_date + (lower + 1)*delta exceeds dt, then either lower or
# lower+1 has to be the solution we are searching for
if start_date + (lower + 1) * delta >= dt:
# Check if start_date + (lower + 1)*delta or
# start_date + lower*delta is closer to dt and return the solution
if (start_date + (lower + 1) * delta) - dt <= dt - (start_date + lower * delta):
return start_date + (lower + 1) * delta
else:
return start_date + lower * delta
# We intersect the interval and either replace the lower or upper
# limit with the candidate
candidate = lower + (upper - lower) // 2
if start_date + candidate * delta >= dt:
upper = candidate
else:
lower = candidate
# in the special case when start_date > dt the search for upper will
# immediately stop for upper == 1 which results in lower = upper // 2 = 0
# and this function returns start_date.
def infer_time_unit(time_seconds_arr):
"""
Determine the most appropriate time unit for an array of time durations
specified in seconds.
e.g. 5400 seconds => 'minutes', 36000 seconds => 'hours'
"""
if len(time_seconds_arr) == 0:
return 'hours'
max_time_seconds = max(time_seconds_arr)
if max_time_seconds <= 60 * 2:
return 'seconds'
elif max_time_seconds <= 60 * 60 * 2:
return 'minutes'
elif max_time_seconds <= 24 * 60 * 60 * 2:
return 'hours'
else:
return 'days'
def scale_time_units(time_seconds_arr, unit):
"""Convert an array of time durations in seconds to the specified time unit."""
if unit == 'minutes':
return list(map(lambda x: x / 60, time_seconds_arr))
elif unit == 'hours':
return list(map(lambda x: x / (60 * 60), time_seconds_arr))
elif unit == 'days':
return list(map(lambda x: x / (24 * 60 * 60), time_seconds_arr))
return time_seconds_arr
def days_ago(n, hour=0, minute=0, second=0, microsecond=0):
"""
Get a datetime object representing `n` days ago. By default the time is
set to midnight.
"""
today = timezone.utcnow().replace(hour=hour, minute=minute, second=second, microsecond=microsecond)
return today - timedelta(days=n)
def parse_execution_date(execution_date_str):
"""Parse execution date string to datetime object."""
return timezone.parse(execution_date_str)
|
|
# Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import struct
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from octavia.cmd import health_checker
from octavia.tests.common import utils as test_utils
from octavia.tests.unit import base
CONF = cfg.CONF
class TestHealthCheckerCMD(base.TestCase):
def setUp(self):
super(TestHealthCheckerCMD, self).setUp()
self.CONF = self.useFixture(oslo_fixture.Config(cfg.CONF))
def test_crc32c(self):
data = b'STRING1234'
result = health_checker.crc32c(data)
self.assertEqual(result, 0x30e0e107)
@mock.patch('random.randint', return_value=42424242)
def test__sctp_build_init_packet(self, mock_randint):
expected_packet = bytearray(
b'\x04\xd2\x16.\x00\x00\x00\x00\x1d9\x96\r\x01\x00\x00\x14:\xde'
b'h\xb1\x00\x01\xa0\x00\x00\n\xff\xff\x02\x87W\xb2')
src_port = 1234
dest_port = 5678
tag = 987654321
pkt = health_checker._sctp_build_init_packet(
src_port, dest_port, tag)
self.assertEqual(pkt, expected_packet)
decoded_src_port = struct.unpack_from('!H', pkt, 0)[0]
decoded_dest_port = struct.unpack_from('!H', pkt, 2)[0]
self.assertEqual(src_port, decoded_src_port)
self.assertEqual(dest_port, decoded_dest_port)
decoded_tag = struct.unpack_from('!L', pkt, 16)[0]
self.assertEqual(tag, decoded_tag)
decoded_checksum = struct.unpack_from('!L', pkt, 8)[0]
# Reset and re-compute checksum
pkt[8] = pkt[9] = pkt[10] = pkt[11] = 0
checksum = health_checker.crc32c(pkt)
self.assertEqual(checksum, decoded_checksum)
def test__sctp_build_abort_packet(self):
expected_packet = bytearray(
b'\x04\xd2\x16.\x02\x93wM3\x83\xbbN\x06\x01\x00\x04')
src_port = 1234
dest_port = 5678
verification_tag = 43218765
pkt = health_checker._sctp_build_abort_packet(
src_port, dest_port, verification_tag)
self.assertEqual(pkt, expected_packet)
decoded_src_port = struct.unpack_from('!H', pkt, 0)[0]
decoded_dest_port = struct.unpack_from('!H', pkt, 2)[0]
self.assertEqual(src_port, decoded_src_port)
self.assertEqual(dest_port, decoded_dest_port)
decoded_tag = struct.unpack_from('!L', pkt, 4)[0]
self.assertEqual(verification_tag, decoded_tag)
decoded_checksum = struct.unpack_from('!L', pkt, 8)[0]
# Reset and re-compute checksum
pkt[8] = pkt[9] = pkt[10] = pkt[11] = 0
checksum = health_checker.crc32c(pkt)
self.assertEqual(checksum, decoded_checksum)
def test__sctp_decode_packet(self):
# IPv4 INIT ACK packet
data = (b'\x45\x00\x00\x00\x00\x01\x01\x01'
b'\x00\x00\xff\x06\x7f\x00\x00\x00'
b'\x7f\x00\x00\x02\x16.\x04\xd2'
b'\x02\x93\x77\x4d\x00\x00\x00\x32'
b'\x02\x00\x00\x16')
family = socket.AF_INET
expected_tag = 43218765
ret = health_checker._sctp_decode_packet(data, family, expected_tag)
self.assertEqual(ret, 2) # INIT ACK
# IPv6 ABORT packet
data = (b'\x16.\x04\xd2\x02\x93\x77\x4d\x00\x00\x00\x32'
b'\x06\x00\x00\x16')
family = socket.AF_INET6
expected_tag = 43218765
ret = health_checker._sctp_decode_packet(data, family, expected_tag)
self.assertEqual(ret, 6) # ABORT
def test__sctp_decode_packet_too_short(self):
# IPv4 packet with different verification tag
data = (b'\x45\x00\x00\x00\x00\x01')
family = socket.AF_INET
expected_tag = 43218765
ret = health_checker._sctp_decode_packet(data, family, expected_tag)
self.assertFalse(ret)
def test__sctp_decode_packet_unexpected(self):
# IPv4 packet with different verification tag
data = (b'\x45\x00\x00\x00\x00\x01\x01\x01'
b'\x00\x00\xff\x06\x7f\x00\x00\x00'
b'\x7f\x00\x00\x02\x16.\x04\xd2'
b'\x02\x91\x17\x4d\x00\x00\x00\x32'
b'\x02\x00\x00\x16')
family = socket.AF_INET
expected_tag = 43218765
ret = health_checker._sctp_decode_packet(data, family, expected_tag)
self.assertFalse(ret)
@mock.patch("time.time")
@mock.patch("socket.socket")
@mock.patch("octavia.cmd.health_checker._sctp_decode_packet")
@mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet")
def test_sctp_health_check(self, mock_build_abort_packet,
mock_decode_packet, mock_socket,
mock_time):
mock_time.side_effect = [1, 2, 3, 4]
socket_mock = mock.Mock()
socket_mock.recvfrom = mock.Mock()
socket_mock.recvfrom.side_effect = [
socket.timeout(),
(None, None)
]
mock_socket.return_value = socket_mock
mock_decode_packet.return_value = 2 # INIT ACK
abrt_mock = mock.Mock()
mock_build_abort_packet.return_value = abrt_mock
mock_open = self.useFixture(
test_utils.OpenFixture('/proc/net/protocols',
'bar\n')).mock_open
with mock.patch('builtins.open', mock_open):
ret = health_checker.sctp_health_check(
"192.168.0.27", 1234, timeout=3)
self.assertEqual(0, ret) # Success
mock_decode_packet.assert_called()
socket_mock.send.assert_called_with(abrt_mock)
@mock.patch("time.time")
@mock.patch("socket.socket")
@mock.patch("octavia.cmd.health_checker._sctp_decode_packet")
@mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet")
def test_sctp_health_check_with_sctp_support(self,
mock_build_abort_packet,
mock_decode_packet,
mock_socket,
mock_time):
mock_time.side_effect = [1, 2, 3, 4]
socket_mock = mock.Mock()
socket_mock.recvfrom = mock.Mock()
socket_mock.recvfrom.side_effect = [
socket.timeout(),
(None, None)
]
mock_socket.return_value = socket_mock
mock_decode_packet.return_value = 2 # INIT ACK
abrt_mock = mock.Mock()
mock_build_abort_packet.return_value = abrt_mock
mock_open = self.useFixture(
test_utils.OpenFixture('/proc/net/protocols',
'SCTP\n')).mock_open
with mock.patch('builtins.open', mock_open):
ret = health_checker.sctp_health_check(
"192.168.0.27", 1234, timeout=3)
self.assertEqual(0, ret) # Success
mock_decode_packet.assert_called()
for call in socket_mock.send.mock_calls:
self.assertNotEqual(mock.call(abrt_mock), call)
@mock.patch("time.time")
@mock.patch("socket.socket")
@mock.patch("octavia.cmd.health_checker._sctp_decode_packet")
@mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet")
def test_sctp_health_check_fail(self, mock_build_abort_packet,
mock_decode_packet, mock_socket,
mock_time):
mock_time.side_effect = [1, 2, 3, 4]
socket_mock = mock.Mock()
socket_mock.recvfrom = mock.Mock()
socket_mock.recvfrom.side_effect = [
socket.timeout(),
(None, None)
]
mock_socket.return_value = socket_mock
mock_decode_packet.return_value = 6 # ABRT
abrt_mock = mock.Mock()
mock_build_abort_packet.return_value = abrt_mock
mock_open = self.useFixture(
test_utils.OpenFixture('/proc/net/protocols',
'bar\n')).mock_open
with mock.patch('builtins.open', mock_open):
ret = health_checker.sctp_health_check(
"192.168.0.27", 1234, timeout=3)
self.assertEqual(1, ret) # Error
mock_decode_packet.assert_called()
for call in socket_mock.send.mock_calls:
self.assertNotEqual(mock.call(abrt_mock), call)
@mock.patch("time.time")
@mock.patch("socket.socket")
@mock.patch("octavia.cmd.health_checker._sctp_decode_packet")
@mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet")
def test_sctp_health_check_error(self, mock_build_abort_packet,
mock_decode_packet, mock_socket,
mock_time):
mock_time.side_effect = [1, 2, 3, 4]
socket_mock = mock.Mock()
socket_mock.recvfrom = mock.Mock()
socket_mock.recvfrom.side_effect = [
socket.timeout(),
(None, None)
]
mock_socket.return_value = socket_mock
mock_decode_packet.return_value = 1234 # Unknown
abrt_mock = mock.Mock()
mock_build_abort_packet.return_value = abrt_mock
mock_open = self.useFixture(
test_utils.OpenFixture('/proc/net/protocols',
'bar\n')).mock_open
with mock.patch('builtins.open', mock_open):
ret = health_checker.sctp_health_check(
"192.168.0.27", 1234, timeout=3)
self.assertEqual(3, ret) # Unknown error
mock_decode_packet.assert_called()
socket_mock.send.assert_called_with(abrt_mock)
@mock.patch("time.time")
@mock.patch("socket.socket")
@mock.patch("octavia.cmd.health_checker._sctp_decode_packet")
@mock.patch("octavia.cmd.health_checker._sctp_build_abort_packet")
def test_sctp_health_check_timeout(self, mock_build_abort_packet,
mock_decode_packet, mock_socket,
mock_time):
mock_time.side_effect = [1, 2, 3, 4]
socket_mock = mock.Mock()
socket_mock.recvfrom = mock.Mock()
socket_mock.recvfrom.side_effect = [
socket.timeout(),
socket.timeout(),
socket.timeout(),
socket.timeout(),
]
mock_socket.return_value = socket_mock
abrt_mock = mock.Mock()
mock_build_abort_packet.return_value = abrt_mock
mock_open = self.useFixture(
test_utils.OpenFixture('/proc/net/protocols',
'bar\n')).mock_open
with mock.patch('builtins.open', mock_open):
ret = health_checker.sctp_health_check(
"192.168.0.27", 1234, timeout=3)
self.assertEqual(2, ret) # Timeout
mock_decode_packet.assert_not_called()
for call in socket_mock.send.mock_calls:
self.assertNotEqual(mock.call(abrt_mock), call)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'ItemRequiresSpell'
db.delete_table('dnd_itemrequiresspell')
# Adding M2M table for field required_spells on 'Item'
db.create_table('dnd_item_required_spells', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('item', models.ForeignKey(orm['dnd.item'], null=False)),
('spell', models.ForeignKey(orm['dnd.spell'], null=False))
))
db.create_unique('dnd_item_required_spells', ['item_id', 'spell_id'])
def backwards(self, orm):
# Adding model 'ItemRequiresSpell'
db.create_table('dnd_itemrequiresspell', (
('item', self.gf('django.db.models.fields.related.ForeignKey')(related_name='required_spells', to=orm['dnd.Item'])),
('remove_comma', self.gf('django.db.models.fields.BooleanField')(default=False)),
('extra', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('text_before', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('spell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Spell'])),
('text_after', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('dnd', ['ItemRequiresSpell'])
# Removing M2M table for field required_spells on 'Item'
db.delete_table('dnd_item_required_spells')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'object_name': 'CharacterClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'short_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.characterclassvariant': {
'Meta': {'ordering': "['character_class__name']", 'unique_together': "(('character_class', 'rulebook'),)", 'object_name': 'CharacterClassVariant'},
'advancement': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'advancement_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'class_features': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_features_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'class_skills': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Skill']", 'symmetrical': 'False', 'blank': 'True'}),
'hit_die': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'required_bab': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'requirements_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill_points': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'starting_gold': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresfeat': {
'Meta': {'object_name': 'CharacterClassVariantRequiresFeat'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresrace': {
'Meta': {'object_name': 'CharacterClassVariantRequiresRace'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_races'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.characterclassvariantrequiresskill': {
'Meta': {'object_name': 'CharacterClassVariantRequiresSkill'},
'character_class_variant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.CharacterClassVariant']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ranks': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'remove_comma': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'text_after': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'text_before': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'core': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'object_name': 'Domain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.feat': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Feat'},
'benefit': ('django.db.models.fields.TextField', [], {}),
'benefit_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'feat_categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.FeatCategory']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'normal': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'normal_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_feat_prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpecialFeatPrerequisite']", 'through': "orm['dnd.FeatSpecialFeatPrerequisite']", 'symmetrical': 'False'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.featcategory': {
'Meta': {'ordering': "['name']", 'object_name': 'FeatCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.featrequiresfeat': {
'Meta': {'object_name': 'FeatRequiresFeat'},
'additional_text': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'required_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_by_feats'", 'to': "orm['dnd.Feat']"}),
'source_feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_feats'", 'to': "orm['dnd.Feat']"})
},
'dnd.featrequiresskill': {
'Meta': {'object_name': 'FeatRequiresSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'required_skills'", 'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.featspecialfeatprerequisite': {
'Meta': {'object_name': 'FeatSpecialFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'special_feat_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpecialFeatPrerequisite']"}),
'value_1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'value_2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'dnd.item': {
'Meta': {'object_name': 'Item'},
'activation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemActivationType']", 'null': 'True', 'blank': 'True'}),
'aura': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemAuraType']", 'null': 'True', 'blank': 'True'}),
'aura_dc': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'aura_schools': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellSchool']", 'symmetrical': 'False', 'blank': 'True'}),
'body_slot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemSlot']", 'null': 'True', 'blank': 'True'}),
'caster_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'cost_to_create': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_bonus': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'price_gp': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'property': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.ItemProperty']", 'null': 'True', 'blank': 'True'}),
'required_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'symmetrical': 'False', 'blank': 'True'}),
'required_spells': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Spell']", 'symmetrical': 'False', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'synergy_prerequisite': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Item']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'visual_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'weight': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'dnd.itemactivationtype': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemActivationType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemauratype': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemAuraType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemproperty': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemProperty'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.itemslot': {
'Meta': {'ordering': "['name']", 'object_name': 'ItemSlot'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.monster': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Monster'},
'advancement': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'alignment': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'armor_class': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'attack': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'base_attack': ('django.db.models.fields.SmallIntegerField', [], {}),
'cha': ('django.db.models.fields.SmallIntegerField', [], {}),
'challenge_rating': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'combat_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {}),
'environment': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'flat_footed_armor_class': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'fort_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'fort_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'full_attack': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'grapple': ('django.db.models.fields.SmallIntegerField', [], {}),
'hit_dice': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initiative': ('django.db.models.fields.SmallIntegerField', [], {}),
'int': ('django.db.models.fields.SmallIntegerField', [], {}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'reflex_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'reflex_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'special_attacks': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'special_qualities': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {}),
'subtypes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.MonsterSubtype']", 'symmetrical': 'False', 'blank': 'True'}),
'touch_armor_class': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'treasure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.MonsterType']"}),
'will_save': ('django.db.models.fields.SmallIntegerField', [], {}),
'will_save_extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {})
},
'dnd.monsterhasfeat': {
'Meta': {'object_name': 'MonsterHasFeat'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feats'", 'to': "orm['dnd.Monster']"})
},
'dnd.monsterhasskill': {
'Meta': {'object_name': 'MonsterHasSkill'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monster': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'skills'", 'to': "orm['dnd.Monster']"}),
'ranks': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"})
},
'dnd.monsterspeed': {
'Meta': {'object_name': 'MonsterSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Monster']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.monstersubtype': {
'Meta': {'ordering': "['name']", 'object_name': 'MonsterSubtype'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.monstertype': {
'Meta': {'ordering': "['name']", 'object_name': 'MonsterType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.newsentry': {
'Meta': {'ordering': "['-published']", 'object_name': 'NewsEntry'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.DateField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.race': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Race'},
'base_monster': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Monster']", 'null': 'True', 'blank': 'True'}),
'cha': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'combat': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'combat_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'con': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dex': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'level_adjustment': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'racial_traits': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'racial_traits_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'reach': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'size': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.RaceSize']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '32', 'db_index': 'True'}),
'space': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'str': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'wis': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'dnd.racefavoredcharacterclass': {
'Meta': {'object_name': 'RaceFavoredCharacterClass'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favored_classes'", 'to': "orm['dnd.Race']"})
},
'dnd.racesize': {
'Meta': {'ordering': "['order']", 'object_name': 'RaceSize'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {})
},
'dnd.racespeed': {
'Meta': {'object_name': 'RaceSpeed'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'race': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Race']"}),
'speed': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['dnd.RaceSpeedType']"})
},
'dnd.racespeedtype': {
'Meta': {'ordering': "['name', 'extra']", 'object_name': 'RaceSpeedType'},
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
},
'dnd.rule': {
'Meta': {'object_name': 'Rule'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page_from': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'page_to': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'published': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'armor_check_penalty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'base_skill': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'required_by_feats': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Feat']", 'through': "orm['dnd.FeatRequiresSkill']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'trained_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.skillvariant': {
'Meta': {'unique_together': "(('skill', 'rulebook'),)", 'object_name': 'SkillVariant'},
'action': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'action_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'check_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'restriction': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'restriction_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Skill']"}),
'special': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'special_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'synergy_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'try_again_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'untrained_html': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'dnd.specialfeatprerequisite': {
'Meta': {'ordering': "['name']", 'object_name': 'SpecialFeatPrerequisite'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'print_format': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'corrupt_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'corrupt_level': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'description_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'extra_components': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'meta_breath_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'page': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '64', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'true_name_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '64', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'})
},
'dnd.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'body': ('django.db.models.fields.TextField', [], {}),
'body_html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'})
},
'dnd.textfeatprerequisite': {
'Meta': {'ordering': "['text']", 'object_name': 'TextFeatPrerequisite'},
'feat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Feat']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['dnd']
|
|
from __future__ import absolute_import
import json
import os
import string
from collections import namedtuple
import paramiko
import logging
from django.core.urlresolvers import reverse
from django.contrib.auth.models import Group, User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import SuspiciousFileOperation
from django.core.files.uploadedfile import UploadedFile
from django.utils.http import int_to_base36
from django.http import HttpResponse
from rest_framework.exceptions import NotFound, PermissionDenied, ValidationError
from mezzanine.utils.email import subject_template, default_token_generator, send_mail_template
from mezzanine.utils.urls import next_url
from mezzanine.conf import settings
from hs_core import hydroshare
from hs_core.hydroshare import check_resource_type, delete_resource_file
from hs_core.models import AbstractMetaDataElement, GenericResource, Relation, ResourceFile, \
get_user
from hs_core.signals import pre_metadata_element_create, post_delete_file_from_resource
from hs_core.hydroshare import FILE_SIZE_LIMIT
from hs_core.hydroshare.utils import raise_file_size_exception, get_file_mime_type
from django_irods.storage import IrodsStorage
from hs_access_control.models import PrivilegeCodes
ActionToAuthorize = namedtuple('ActionToAuthorize',
'VIEW_METADATA, '
'VIEW_RESOURCE, '
'EDIT_RESOURCE, '
'SET_RESOURCE_FLAG, '
'DELETE_RESOURCE, '
'CREATE_RESOURCE_VERSION, '
'VIEW_RESOURCE_ACCESS, '
'EDIT_RESOURCE_ACCESS')
ACTION_TO_AUTHORIZE = ActionToAuthorize(0, 1, 2, 3, 4, 5, 6, 7)
def json_or_jsonp(r, i, code=200):
if not isinstance(i, basestring):
i = json.dumps(i)
if 'callback' in r.REQUEST:
return HttpResponse('{c}({i})'.format(c=r.REQUEST['callback'], i=i),
content_type='text/javascript')
elif 'jsonp' in r.REQUEST:
return HttpResponse('{c}({i})'.format(c=r.REQUEST['jsonp'], i=i),
content_type='text/javascript')
else:
return HttpResponse(i, content_type='application/json', status=code)
# Since an SessionException will be raised for all irods-related operations from django_irods
# module, there is no need to raise iRODS SessionException from within this function
def upload_from_irods(username, password, host, port, zone, irods_fnames, res_files):
"""
use iget to transfer selected data object from irods zone to local as a NamedTemporaryFile
:param username: iRODS login account username used to download irods data object for uploading
:param password: iRODS login account password used to download irods data object for uploading
:param host: iRODS login host used to download irods data object for uploading
:param port: iRODS login port used to download irods data object for uploading
:param zone: iRODS login zone used to download irods data object for uploading
:param irods_fnames: the data object file name to download to local for uploading
:param res_files: list of files for uploading to create resources
:raises SessionException(proc.returncode, stdout, stderr) defined in django_irods/icommands.py
to capture iRODS exceptions raised from iRODS icommand subprocess run triggered from
any method calls from IrodsStorage() if an error or exception ever occurs
:return: None, but the downloaded file from the iRODS will be appended to res_files list for
uploading
"""
irods_storage = IrodsStorage()
irods_storage.set_user_session(username=username, password=password, host=host, port=port,
zone=zone)
ifnames = string.split(irods_fnames, ',')
for ifname in ifnames:
size = irods_storage.size(ifname)
if size > FILE_SIZE_LIMIT:
raise_file_size_exception()
tmpFile = irods_storage.download(ifname)
fname = os.path.basename(ifname.rstrip(os.sep))
res_files.append(UploadedFile(file=tmpFile, name=fname, size=size))
# delete the user session after iRODS file operations are done
irods_storage.delete_user_session()
def run_ssh_command(host, uname, pwd, exec_cmd):
"""
run ssh client to ssh to a remote host and run a command on the remote host
Args:
host: remote host name to ssh to
uname: the username on the remote host to ssh to
pwd: the password of the user on the remote host to ssh to
exec_cmd: the command to be executed on the remote host via ssh
Returns:
None, but raises SSHException from paramiko if there is any error during ssh
connection and command execution
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=uname, password=pwd)
transport = ssh.get_transport()
session = transport.open_session()
session.set_combine_stderr(True)
session.get_pty()
session.exec_command(exec_cmd)
stdin = session.makefile('wb', -1)
stdout = session.makefile('rb', -1)
stdin.write("{cmd}\n".format(cmd=pwd))
stdin.flush()
logger = logging.getLogger('django')
output = stdout.readlines()
if output:
logger.debug(output)
return '.'.join(output)
else:
return ''
# run the update script on hyrax server via ssh session for netCDF resources on demand
# when private netCDF resources are made public so that all links of data services
# provided by Hyrax service are instantaneously available on demand
def run_script_to_update_hyrax_input_files(shortkey):
run_ssh_command(host=settings.HYRAX_SSH_HOST, uname=settings.HYRAX_SSH_PROXY_USER,
pwd=settings.HYRAX_SSH_PROXY_USER_PWD,
exec_cmd=settings.HYRAX_SCRIPT_RUN_COMMAND + ' ' + shortkey)
def can_user_copy_resource(res, user):
"""
Check whether resource copy is permitted or not
:param res: resource object to check for whether copy is allowed
:param user: the requesting user to check for whether copy is allowed
:return: return True if the resource can be copied; otherwise, return False
"""
if not user.is_authenticated():
return False
if not user.uaccess.owns_resource(res) and \
(res.metadata.rights.statement == "This resource is shared under the Creative "
"Commons Attribution-NoDerivs CC BY-ND." or
res.metadata.rights.statement == "This resource is shared under the Creative "
"Commons Attribution-NoCommercial-NoDerivs "
"CC BY-NC-ND."):
return False
return True
def authorize(request, res_id, needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE,
raises_exception=True):
"""
This function checks if a user has authorization for resource related actions as outlined
below. This function doesn't check authorization for user sharing resource with another user.
How this function should be called for different actions on a resource by a specific user?
1. User wants to view a resource (both metadata and content files) which includes
downloading resource bag or resource content files:
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE)
2. User wants to view resource metadata only:
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_METADATA)
3. User wants to edit a resource which includes:
a. edit metadata
b. add file to resource
c. delete a file from the resource
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.EDIT_RESOURCE)
4. User wants to set resource flag (public, published, shareable etc):
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.SET_RESOURCE_FLAG)
5. User wants to delete a resource:
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.DELETE_RESOURCE)
6. User wants to create new version of a resource:
authorize(request, res_id=id_of_resource,
needed_permission=ACTION_TO_AUTHORIZE.CREATE_RESOURCE_VERSION)
Note: resource 'shareable' status has no effect on authorization
"""
authorized = False
user = get_user(request)
try:
res = hydroshare.utils.get_resource_by_shortkey(res_id, or_404=False)
except ObjectDoesNotExist:
raise NotFound(detail="No resource was found for resource id:%s" % res_id)
if needed_permission == ACTION_TO_AUTHORIZE.VIEW_METADATA:
if res.raccess.discoverable or res.raccess.public:
authorized = True
elif user.is_authenticated() and user.is_active:
authorized = user.uaccess.can_view_resource(res)
elif user.is_authenticated() and user.is_active:
if needed_permission == ACTION_TO_AUTHORIZE.VIEW_RESOURCE:
authorized = user.uaccess.can_view_resource(res)
elif needed_permission == ACTION_TO_AUTHORIZE.EDIT_RESOURCE:
authorized = user.uaccess.can_change_resource(res)
elif needed_permission == ACTION_TO_AUTHORIZE.DELETE_RESOURCE:
authorized = user.uaccess.can_delete_resource(res)
elif needed_permission == ACTION_TO_AUTHORIZE.SET_RESOURCE_FLAG:
authorized = user.uaccess.can_change_resource_flags(res)
elif needed_permission == ACTION_TO_AUTHORIZE.CREATE_RESOURCE_VERSION:
authorized = user.uaccess.owns_resource(res)
elif needed_permission == ACTION_TO_AUTHORIZE.VIEW_RESOURCE_ACCESS:
authorized = user.uaccess.can_view_resource(res)
elif needed_permission == ACTION_TO_AUTHORIZE.EDIT_RESOURCE_ACCESS:
authorized = user.uaccess.can_share_resource(res, 2)
elif needed_permission == ACTION_TO_AUTHORIZE.VIEW_RESOURCE:
authorized = res.raccess.public
if raises_exception and not authorized:
raise PermissionDenied()
else:
return res, authorized, user
def validate_json(js):
try:
json.loads(js)
except ValueError:
raise ValidationError(detail='Invalid JSON')
def validate_user_name(user_name):
if not User.objects.filter(username=user_name).exists():
raise ValidationError(detail='No user found for user name:%s' % user_name)
def validate_group_name(group_name):
if not Group.objects.filter(name=group_name).exists():
raise ValidationError(detail='No group found for group name:%s' % group_name)
def validate_metadata(metadata, resource_type):
"""
Validate metadata including validation of resource type specific metadata.
If validation fails, ValidationError exception is raised.
Note: This validation does not check if a specific element is repeatable or not. If an element
is not repeatable and the metadata list contains more than one dict for the same element type,
then exception will be raised when that element is created the 2nd time.
:param metadata: a list of dicts where each dict defines data for a specific metadata
element.
Example: the following list contains 2 dict elements - one for 'Description' element
and the other one for "Coverage' element.
[{'description':{'abstract': 'This is a great resource'}},
{'coverage': {'value':{'type': 'period', 'start': 01/01/2010', 'end': '12/12/2015'}}}]
:param resource_type: resource type name (e.g., "GenericResource" or "TimeSeriesResource")
:return:
"""
resource_class = check_resource_type(resource_type)
validation_errors = {'metadata': []}
for element in metadata:
# here k is the name of the element
# v is a dict of all element attributes/field names and field values
k, v = element.items()[0]
is_core_element = False
model_type = None
try:
model_type = ContentType.objects.get(app_label=resource_class._meta.app_label, model=k)
except ObjectDoesNotExist:
try:
model_type = ContentType.objects.get(app_label='hs_core', model=k)
is_core_element = True
except ObjectDoesNotExist:
validation_errors['metadata'].append("Invalid metadata element name:%s." % k)
if model_type:
if not issubclass(model_type.model_class(), AbstractMetaDataElement):
validation_errors['metadata'].append("Invalid metadata element name:%s." % k)
element_attribute_names_valid = True
for attribute_name in v:
element_class = model_type.model_class()
if k.lower() == 'coverage' or k.lower() == 'originalcoverage':
if attribute_name == 'value':
attribute_name = '_value'
if hasattr(element_class(), attribute_name):
if callable(getattr(element_class(), attribute_name)):
element_attribute_names_valid = False
validation_errors['metadata'].append(
"Invalid attribute name:%s found for metadata element name:%s."
% (attribute_name, k))
else:
element_attribute_names_valid = False
validation_errors['metadata'].append(
"Invalid attribute name:%s found for metadata element name:%s."
% (attribute_name, k))
if element_attribute_names_valid:
if is_core_element:
element_resource_class = GenericResource().__class__
else:
element_resource_class = resource_class
# here we expect element form validation to happen as part of the signal handler
# in each resource type
handler_response = pre_metadata_element_create.send(
sender=element_resource_class, element_name=k,
request=MetadataElementRequest(k, **v))
for receiver, response in handler_response:
if 'is_valid' in response:
if not response['is_valid']:
validation_errors['metadata'].append(
"Invalid data found for metadata element name:%s." % k)
else:
validation_errors['metadata'].append(
"Invalid data found for metadata element name:%s." % k)
if len(validation_errors['metadata']) > 0:
raise ValidationError(detail=validation_errors)
class MetadataElementRequest(object):
def __init__(self, element_name, **element_data_dict):
if element_name.lower() == 'coverage' or element_name.lower() == 'originalcoverage':
cov_type = element_data_dict.get('type', None)
if 'value' in element_data_dict:
element_data_dict = element_data_dict['value']
if cov_type is not None:
element_data_dict['type'] = cov_type
self.POST = element_data_dict
def create_form(formclass, request):
try:
params = formclass(data=json.loads(request.body))
except ValueError:
params = formclass(data=request.REQUEST)
return params
def get_my_resources_list(request):
user = request.user
# get a list of resources with effective OWNER privilege
owned_resources = user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.OWNER)
# remove obsoleted resources from the owned_resources
owned_resources = owned_resources.exclude(object_id__in=Relation.objects.filter(
type='isReplacedBy').values('object_id'))
# get a list of resources with effective CHANGE privilege
editable_resources = user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.CHANGE)
# remove obsoleted resources from the editable_resources
editable_resources = editable_resources.exclude(object_id__in=Relation.objects.filter(
type='isReplacedBy').values('object_id'))
# get a list of resources with effective VIEW privilege
viewable_resources = user.uaccess.get_resources_with_explicit_access(PrivilegeCodes.VIEW)
# remove obsoleted resources from the viewable_resources
viewable_resources = viewable_resources.exclude(object_id__in=Relation.objects.filter(
type='isReplacedBy').values('object_id'))
owned_resources = list(owned_resources)
editable_resources = list(editable_resources)
viewable_resources = list(viewable_resources)
favorite_resources = list(user.ulabels.favorited_resources)
labeled_resources = list(user.ulabels.labeled_resources)
discovered_resources = list(user.ulabels.my_resources)
for res in owned_resources:
res.owned = True
for res in editable_resources:
res.editable = True
for res in viewable_resources:
res.viewable = True
for res in (owned_resources + editable_resources + viewable_resources + discovered_resources):
res.is_favorite = False
if res in favorite_resources:
res.is_favorite = True
if res in labeled_resources:
res.labels = res.rlabels.get_labels(user)
resource_collection = (owned_resources + editable_resources + viewable_resources +
discovered_resources)
return resource_collection
def send_action_to_take_email(request, user, action_type, **kwargs):
"""
Sends an email with an action link to a user.
The actual action takes place when the user clicks on the link
The ``action_type`` arg is both the name of the urlpattern for
the action link, as well as the names of the email templates
to use. Additional context variable needed in the email template can be
passed using the kwargs
for action_type == 'group_membership', an instance of GroupMembershipRequest and
instance of Group are expected to
be passed into this function
"""
email_to = kwargs.get('group_owner', user)
context = {'request': request, 'user': user}
if action_type == 'group_membership':
membership_request = kwargs['membership_request']
action_url = reverse(action_type, kwargs={
"uidb36": int_to_base36(email_to.id),
"token": default_token_generator.make_token(email_to),
"membership_request_id": membership_request.id
}) + "?next=" + (next_url(request) or "/")
context['group'] = kwargs.pop('group')
elif action_type == 'group_auto_membership':
context['group'] = kwargs.pop('group')
action_url = ''
else:
action_url = reverse(action_type, kwargs={
"uidb36": int_to_base36(email_to.id),
"token": default_token_generator.make_token(email_to)
}) + "?next=" + (next_url(request) or "/")
context['action_url'] = action_url
context.update(kwargs)
subject_template_name = "email/%s_subject.txt" % action_type
subject = subject_template(subject_template_name, context)
send_mail_template(subject, "email/%s" % action_type,
settings.DEFAULT_FROM_EMAIL, email_to.email,
context=context)
def show_relations_section(res_obj):
"""
This func is to determine whether to show 'Relations' section in 'Related Resources' tab.
Return True if number of "hasPart" < number of all relation metadata
:param res_obj: resource object
:return: Bool
"""
all_relation_count = res_obj.metadata.relations.count()
has_part_count = res_obj.metadata.relations.filter(type="hasPart").count()
if all_relation_count > has_part_count:
return True
return False
def link_irods_file_to_django(resource, filename, size=0):
# link the newly created file (**filename**) to Django resource model
b_add_file = False
if resource:
if resource.resource_federation_path:
if resource.resource_federation_path in filename:
start_idx = len(resource.resource_federation_path) + len(resource.short_id) + 2
filename = filename[start_idx:]
if not ResourceFile.objects.filter(object_id=resource.id,
fed_resource_file_name_or_path=filename).exists():
ResourceFile.objects.create(content_object=resource,
resource_file=None,
fed_resource_file_name_or_path=filename,
fed_resource_file_size=size)
b_add_file = True
elif not ResourceFile.objects.filter(object_id=resource.id,
resource_file=filename).exists():
ResourceFile.objects.create(content_object=resource,
resource_file=filename)
b_add_file = True
if b_add_file:
file_format_type = get_file_mime_type(filename)
if file_format_type not in [mime.value for mime in resource.metadata.formats.all()]:
resource.metadata.create_element('format', value=file_format_type)
# this should assign a logical file object to this new file
# if this resource supports logical file
resource.set_default_logical_file()
def link_irods_folder_to_django(resource, istorage, foldername, exclude=()):
"""
Recursively Link irods folder and all files and sub-folders inside the folder to Django
Database after iRODS file and folder operations to get Django and iRODS in sync
:param resource: the BaseResource object representing a HydroShare resource
:param istorage: IrodsStorage object
:param foldername: the folder name
:param exclude: a tuple that includes file names to be excluded from linking under the folder;
default is empty meaning nothing is excluded.
:return:
"""
if resource and istorage and foldername:
store = istorage.listdir(foldername)
# add files into Django resource model
for file in store[1]:
if file not in exclude:
file_path = os.path.join(foldername, file)
size = istorage.size(file_path)
link_irods_file_to_django(resource, file_path, size)
# recursively add sub-folders into Django resource model
for folder in store[0]:
link_irods_folder_to_django(resource,
istorage, os.path.join(foldername, folder), exclude)
def rename_irods_file_or_folder_in_django(resource, src_name, tgt_name):
"""
Rename file in Django DB after the file is renamed in Django side
:param resource: the BaseResource object representing a HydroShare resource
:param src_name: the file or folder full path name to be renamed
:param tgt_name: the file or folder full path name to be renamed to
:return:
"""
if resource.resource_federation_path:
res_file_obj = ResourceFile.objects.filter(object_id=resource.id,
fed_resource_file_name_or_path=src_name)
if res_file_obj.exists():
# src_name and tgt_name are file names - replace src_name with tgt_name
# have to delete the original one and create the new one;
# direct replacement does not work
res_file_obj[0].delete()
ResourceFile.objects.create(content_object=resource,
fed_resource_file_name_or_path=tgt_name)
else:
# src_name and tgt_name are folder names
res_file_objs = \
ResourceFile.objects.filter(object_id=resource.id,
fed_resource_file_name_or_path__contains=src_name)
for fobj in res_file_objs:
old_str = fobj.fed_resource_file_name_or_path
new_str = old_str.replace(src_name, tgt_name)
fobj.delete()
ResourceFile.objects.create(content_object=resource,
fed_resource_file_name_or_path=new_str)
else:
res_file_obj = ResourceFile.objects.filter(object_id=resource.id,
resource_file=src_name)
if res_file_obj.exists():
# src_name and tgt_name are file names
# since resource_file is a FileField which cannot be directly renamed,
# this old ResourceFile object has to be deleted followed by creation of
# a new ResourceFile with new file associated that replace the old one
# check if the resource file is part of a logical file
logical_file = res_file_obj[0].logical_file if res_file_obj[0].has_logical_file \
else None
res_file_obj[0].delete()
res_file = ResourceFile.objects.create(content_object=resource, resource_file=tgt_name)
# if the file we deleted was part a logical file then we have to make the
# recreated resource file part of the logical file object
if logical_file is not None:
logical_file.add_resource_file(res_file)
else:
# src_name and tgt_name are folder names
res_file_objs = \
ResourceFile.objects.filter(object_id=resource.id,
resource_file__contains=src_name)
for fobj in res_file_objs:
old_str = fobj.resource_file.name
new_str = old_str.replace(src_name, tgt_name)
# get the logical file object associated with the resource file
# so that we cam make the recreated resource file part of the same
# logical file object
logical_file = fobj.logical_file if fobj.has_logical_file else None
fobj.delete()
res_file = ResourceFile.objects.create(content_object=resource,
resource_file=new_str)
# make the recreated resource file part of the logical file
if logical_file is not None:
logical_file.add_resource_file(res_file)
def remove_irods_folder_in_django(resource, istorage, foldername, user):
"""
Remove all files inside a folder in Django DB after the folder is removed from iRODS
:param resource: the BaseResource object representing a HydroShare resource
:param istorage: IrodsStorage object
:param foldername: the folder name that has been removed from iRODS
:user user who initiated the folder delete operation
:return:
"""
if resource and istorage and foldername:
if not foldername.endswith('/'):
foldername += '/'
if resource.resource_federation_path:
if resource.resource_federation_path in foldername:
start_idx = len(resource.resource_federation_path) + len(resource.short_id) + 2
foldername = foldername[start_idx:]
res_file_set = ResourceFile.objects.filter(
object_id=resource.id, fed_resource_file_name_or_path__icontains=foldername)
else:
res_file_set = ResourceFile.objects.filter(
object_id=resource.id, resource_file__icontains=foldername)
# delete all unique logical file objects associated with any resource files to be deleted
# from django as they need to be deleted differently
logical_files = list(set([f.logical_file for f in res_file_set if f.has_logical_file]))
for lf in logical_files:
# this should delete the logical file and any associated metadata
# but does not delete the resource files that are part of the logical file
lf.logical_delete(user, delete_res_files=False)
# delete resource file objects
for f in res_file_set:
filename = hydroshare.get_resource_file_name(f)
f.delete()
hydroshare.delete_format_metadata_after_delete_file(resource, filename)
# send the signal
post_delete_file_from_resource.send(sender=resource.__class__, resource=resource)
def zip_folder(user, res_id, input_coll_path, output_zip_fname, bool_remove_original):
"""
Zip input_coll_path into a zip file in hydroshareZone or any federated zone used for HydroShare
resource backend store and modify HydroShare Django site accordingly.
:param user: the requesting user
:param res_id: resource uuid
:param input_coll_path: relative sub-collection path under res_id collection to be zipped.
:param output_zip_fname: file name only with no path of the generated zip file name
:param bool_remove_original: a boolean indicating whether original files will be deleted
after zipping.
:return: output_zip_fname and output_zip_size pair
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
res_coll_input = os.path.join(resource.root_path, input_coll_path)
# check resource supports zipping of a folder
if not resource.supports_zip(res_coll_input):
raise ValidationError("Folder zipping is not supported.")
# check if resource supports deleting the original folder after zipping
if bool_remove_original:
if not resource.supports_delete_folder_on_zip(input_coll_path):
raise ValidationError("Deleting of original folder is not allowed after "
"zipping of a folder.")
content_dir = os.path.dirname(res_coll_input)
output_zip_full_path = os.path.join(content_dir, output_zip_fname)
istorage.session.run("ibun", None, '-cDzip', '-f', output_zip_full_path, res_coll_input)
output_zip_size = istorage.size(output_zip_full_path)
link_irods_file_to_django(resource, output_zip_full_path, output_zip_size)
if bool_remove_original:
for f in ResourceFile.objects.filter(object_id=resource.id):
full_path_name, basename, _ = \
hydroshare.utils.get_resource_file_name_and_extension(f)
if resource.resource_federation_path:
full_path_name = os.path.join(resource.root_path, full_path_name)
if res_coll_input in full_path_name and output_zip_full_path not in full_path_name:
delete_resource_file(res_id, basename, user)
# remove empty folder in iRODS
istorage.delete(res_coll_input)
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
return output_zip_fname, output_zip_size
def unzip_file(user, res_id, zip_with_rel_path, bool_remove_original):
"""
Unzip the input zip file while preserving folder structures in hydroshareZone or
any federated zone used for HydroShare resource backend store and keep Django DB in sync.
:param user: requesting user
:param res_id: resource uuid
:param zip_with_rel_path: the zip file name with relative path under res_id collection to
be unzipped
:param bool_remove_original: a bool indicating whether original zip file will be deleted
after unzipping.
:return:
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
zip_with_full_path = os.path.join(resource.root_path, zip_with_rel_path)
if not resource.supports_unzip(zip_with_rel_path):
raise ValidationError("Unzipping of this file is not supported.")
unzip_path = os.path.dirname(zip_with_full_path)
zip_fname = os.path.basename(zip_with_rel_path)
istorage.session.run("ibun", None, '-xDzip', zip_with_full_path, unzip_path)
link_irods_folder_to_django(resource, istorage, unzip_path, (zip_fname,))
if bool_remove_original:
delete_resource_file(res_id, zip_fname, user)
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
def create_folder(res_id, folder_path):
"""
create a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param res_id: resource uuid
:param folder_path: relative path for the new folder to be created under
res_id collection/directory
:return:
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
if not resource.supports_folder_creation(coll_path):
raise ValidationError("Folder creation is not allowed here.")
istorage.session.run("imkdir", None, '-p', coll_path)
def remove_folder(user, res_id, folder_path):
"""
remove a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param folder_path: the relative path for the folder to be removed under res_id collection.
:return:
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
# TODO: Pabitra - resource should check here if folder can be removed
istorage.delete(coll_path)
remove_irods_folder_in_django(resource, istorage, coll_path, user)
if resource.raccess.public or resource.raccess.discoverable:
if not resource.can_be_public_or_discoverable:
resource.raccess.public = False
resource.raccess.discoverable = False
resource.raccess.save()
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
def list_folder(res_id, folder_path):
"""
list a sub-folder/sub-collection in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param folder_path: the relative path for the folder to be listed under res_id collection.
:return:
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
coll_path = os.path.join(resource.root_path, folder_path)
return istorage.listdir(coll_path)
def move_or_rename_file_or_folder(user, res_id, src_path, tgt_path, validate_move_rename=True):
"""
Move or rename a file or folder in hydroshareZone or any federated zone used for HydroShare
resource backend store.
:param user: requesting user
:param res_id: resource uuid
:param src_path: the relative paths for the source file or folder under res_id collection
:param tgt_path: the relative paths for the target file or folder under res_id collection
:param validate_move_rename: if True, then only ask resource type to check if this action is
allowed. Sometimes resource types internally want to take this action but disallow
this action by a user. In that case resource types set this parameter to False to allow
this action.
:return:
"""
resource = hydroshare.utils.get_resource_by_shortkey(res_id)
istorage = resource.get_irods_storage()
src_full_path = os.path.join(resource.root_path, src_path)
tgt_full_path = os.path.join(resource.root_path, tgt_path)
tgt_file_name = os.path.basename(tgt_full_path)
tgt_file_dir = os.path.dirname(tgt_full_path)
src_file_name = os.path.basename(src_full_path)
src_file_dir = os.path.dirname(src_full_path)
# ensure the target_full_path contains the file name to be moved or renamed to
# if we are moving directories, put the filename into the request.
if src_file_dir != tgt_file_dir and tgt_file_name != src_file_name:
tgt_full_path = os.path.join(tgt_full_path, src_file_name)
if validate_move_rename:
# this must raise ValidationError if move/rename is not allowed by specific resource type
if not resource.supports_rename_path(src_full_path, tgt_full_path):
raise ValidationError("File/folder move/rename is not allowed.")
istorage.moveFile(src_full_path, tgt_full_path)
if resource.resource_federation_path:
rename_irods_file_or_folder_in_django(resource, src_path, tgt_path)
else:
rename_irods_file_or_folder_in_django(resource, src_full_path, tgt_full_path)
hydroshare.utils.resource_modified(resource, user, overwrite_bag=False)
def irods_path_is_allowed(path):
""" paths containing '/../' are suspicious """
if path == "":
raise ValidationError("Empty file paths are not allowed")
if '/../' in path:
raise SuspiciousFileOperation("File paths cannot contain '/../'")
if '/./' in path:
raise SuspiciousFileOperation("File paths cannot contain '/./'")
def get_coverage_data_dict(resource, coverage_type='spatial'):
"""Get coverage data as a dict for the specified resource
:param resource: An instance of BaseResource for which coverage data is needed
:param coverage_type: Type of coverage data needed. Default is spatial otherwise temporal
:return A dict of coverage data
"""
if coverage_type.lower() == 'spatial':
spatial_coverage = resource.metadata.coverages.exclude(type='period').first()
spatial_coverage_dict = {}
if spatial_coverage:
spatial_coverage_dict['type'] = spatial_coverage.type
if spatial_coverage.type == 'point':
spatial_coverage_dict['east'] = spatial_coverage.value['east']
spatial_coverage_dict['north'] = spatial_coverage.value['north']
else:
# type is box
spatial_coverage_dict['eastlimit'] = spatial_coverage.value['eastlimit']
spatial_coverage_dict['northlimit'] = spatial_coverage.value['northlimit']
spatial_coverage_dict['westlimit'] = spatial_coverage.value['westlimit']
spatial_coverage_dict['southlimit'] = spatial_coverage.value['southlimit']
return spatial_coverage_dict
else:
temporal_coverage = resource.metadata.coverages.filter(type='period').first()
temporal_coverage_dict = {}
if temporal_coverage:
temporal_coverage_dict['type'] = temporal_coverage.type
temporal_coverage_dict['start'] = temporal_coverage.value['start']
temporal_coverage_dict['end'] = temporal_coverage.value['end']
return temporal_coverage_dict
|
|
# -*- coding: utf-8 -*-
'''
Module for running arbitrary tests
'''
from __future__ import absolute_import
# Import Python libs
import os
import sys
import time
import traceback
import random
# Import Salt libs
import salt
import salt.utils
import salt.version
import salt.loader
import salt.ext.six as six
from salt.utils.decorators import depends
__proxyenabled__ = ['*']
# Don't shadow built-in's.
__func_alias__ = {
'true_': 'true',
'false_': 'false'
}
@depends('non_existantmodulename')
def missing_func():
return 'foo'
def attr_call():
'''
Call grains.items via the attribute
CLI Example::
.. code-block:: bash
salt '*' test.attr_call
'''
return __salt__.grains.items()
def module_report():
'''
Return a dict containing all of the exeution modules with a report on
the overall availability via different references
CLI Example::
.. code-block:: bash
salt '*' test.module_report
'''
ret = {'functions': [],
'function_attrs': [],
'function_subs': [],
'modules': [],
'module_attrs': [],
'missing_attrs': [],
'missing_subs': []}
for ref in __salt__:
if '.' in ref:
ret['functions'].append(ref)
else:
ret['modules'].append(ref)
if hasattr(__salt__, ref):
ret['module_attrs'].append(ref)
for func in __salt__[ref]:
full = '{0}.{1}'.format(ref, func)
if hasattr(getattr(__salt__, ref), func):
ret['function_attrs'].append(full)
if func in __salt__[ref]:
ret['function_subs'].append(full)
for func in ret['functions']:
if func not in ret['function_attrs']:
ret['missing_attrs'].append(func)
if func not in ret['function_subs']:
ret['missing_subs'].append(func)
return ret
def echo(text):
'''
Return a string - used for testing the connection
CLI Example:
.. code-block:: bash
salt '*' test.echo 'foo bar baz quo qux'
'''
return text
def ping():
'''
Used to make sure the minion is up and responding. Not an ICMP ping.
Returns ``True``.
CLI Example:
.. code-block:: bash
salt '*' test.ping
'''
if not salt.utils.is_proxy():
return True
else:
ping_cmd = __opts__['proxy']['proxytype'] + '.ping'
if __opts__.get('add_proxymodule_to_opts', False):
return __opts__['proxymodule'][ping_cmd]()
else:
return __proxy__[ping_cmd]()
def sleep(length):
'''
Instruct the minion to initiate a process that will sleep for a given
period of time.
CLI Example:
.. code-block:: bash
salt '*' test.sleep 20
'''
time.sleep(int(length))
return True
def rand_sleep(max=60):
'''
Sleep for a random number of seconds, used to test long-running commands
and minions returning at differing intervals
CLI Example:
.. code-block:: bash
salt '*' test.rand_sleep 60
'''
time.sleep(random.randint(0, max))
return True
def version():
'''
Return the version of salt on the minion
CLI Example:
.. code-block:: bash
salt '*' test.version
'''
return salt.version.__version__
def versions_information():
'''
Report the versions of dependent and system software
CLI Example:
.. code-block:: bash
salt '*' test.versions_information
'''
return salt.version.versions_information()
def versions_report():
'''
Returns versions of components used by salt
CLI Example:
.. code-block:: bash
salt '*' test.versions_report
'''
return '\n'.join(salt.version.versions_report())
versions = salt.utils.alias_function(versions_report, 'versions')
def conf_test():
'''
Return the value for test.foo in the minion configuration file, or return
the default value
CLI Example:
.. code-block:: bash
salt '*' test.conf_test
'''
return __salt__['config.option']('test.foo')
def get_opts():
'''
Return the configuration options passed to this minion
CLI Example:
.. code-block:: bash
salt '*' test.get_opts
'''
return __opts__
def cross_test(func, args=None):
'''
Execute a minion function via the __salt__ object in the test
module, used to verify that the minion functions can be called
via the __salt__ module.
CLI Example:
.. code-block:: bash
salt '*' test.cross_test file.gid_to_group 0
'''
if args is None:
args = []
return __salt__[func](*args)
def kwarg(**kwargs):
'''
Print out the data passed into the function ``**kwargs``, this is used to
both test the publication data and cli kwarg passing, but also to display
the information available within the publication data.
CLI Example:
.. code-block:: bash
salt '*' test.kwarg num=1 txt="two" env='{a: 1, b: "hello"}'
'''
return kwargs
def arg(*args, **kwargs):
'''
Print out the data passed into the function ``*args`` and ```kwargs``, this
is used to both test the publication data and cli argument passing, but
also to display the information available within the publication data.
Returns {"args": args, "kwargs": kwargs}.
CLI Example:
.. code-block:: bash
salt '*' test.arg 1 "two" 3.1 txt="hello" wow='{a: 1, b: "hello"}'
'''
return {"args": args, "kwargs": kwargs}
def arg_type(*args, **kwargs):
'''
Print out the types of the args and kwargs. This is used to test the types
of the args and kwargs passed down to the minion
CLI Example:
.. code-block:: bash
salt '*' test.arg_type 1 'int'
'''
ret = {'args': [], 'kwargs': {}}
# all the args
for argument in args:
ret['args'].append(str(type(argument)))
# all the kwargs
for key, val in six.iteritems(kwargs):
ret['kwargs'][key] = str(type(val))
return ret
def arg_repr(*args, **kwargs):
'''
Print out the data passed into the function ``*args`` and ```kwargs``, this
is used to both test the publication data and cli argument passing, but
also to display the information available within the publication data.
Returns {"args": repr(args), "kwargs": repr(kwargs)}.
CLI Example:
.. code-block:: bash
salt '*' test.arg_repr 1 "two" 3.1 txt="hello" wow='{a: 1, b: "hello"}'
'''
return {"args": repr(args), "kwargs": repr(kwargs)}
def fib(num):
'''
Return the num-th Fibonacci number, and the time it took to compute in
seconds. Used for performance tests.
This function is designed to have terrible performance.
CLI Example:
.. code-block:: bash
salt '*' test.fib 3
'''
num = int(num)
start = time.time()
if num < 2:
return num, time.time() - start
return _fib(num-1) + _fib(num-2), time.time() - start
def _fib(num):
'''
Helper method for test.fib, doesn't calculate the time.
'''
if num < 2:
return num
return _fib(num-1) + _fib(num-2)
def collatz(start):
'''
Execute the collatz conjecture from the passed starting number,
returns the sequence and the time it took to compute. Used for
performance tests.
CLI Example:
.. code-block:: bash
salt '*' test.collatz 3
'''
start = int(start)
begin = time.time()
steps = []
while start != 1:
steps.append(start)
if start > 1:
if start % 2 == 0:
start = start / 2
else:
start = start * 3 + 1
return steps, time.time() - begin
def outputter(data):
'''
Test the outputter, pass in data to return
CLI Example:
.. code-block:: bash
salt '*' test.outputter foobar
'''
return data
def retcode(code=42):
'''
Test that the returncode system is functioning correctly
CLI Example:
.. code-block:: bash
salt '*' test.retcode 42
'''
__context__['retcode'] = code
return True
def provider(module):
'''
Pass in a function name to discover what provider is being used
CLI Example:
.. code-block:: bash
salt '*' test.provider service
'''
func = ''
for key in __salt__:
if not key.startswith('{0}.'.format(module)):
continue
func = key
break
if not func:
return ''
pfn = sys.modules[__salt__[func].__module__].__file__
pfn = os.path.basename(pfn)
return pfn[:pfn.rindex('.')]
def providers():
'''
Return a dict of the provider names and the files that provided them
CLI Example:
.. code-block:: bash
salt '*' test.providers
'''
ret = {}
for funcname in __salt__:
modname = funcname.split('.')[0]
if modname not in ret:
ret[provider(modname)] = modname
return ret
def not_loaded():
'''
List the modules that were not loaded by the salt loader system
CLI Example:
.. code-block:: bash
salt '*' test.not_loaded
'''
prov = providers()
ret = set()
for mod_dir in salt.loader._module_dirs(__opts__, 'modules', 'module'):
if not os.path.isabs(mod_dir):
continue
if not os.path.isdir(mod_dir):
continue
for fn_ in os.listdir(mod_dir):
if fn_.startswith('_'):
continue
name = fn_.split('.')[0]
if name not in prov:
ret.add(name)
return sorted(ret)
def opts_pkg():
'''
Return an opts package with the grains and opts for this minion.
This is primarily used to create the options used for master side
state compiling routines
CLI Example:
.. code-block:: bash
salt '*' test.opts_pkg
'''
ret = {}
ret.update(__opts__)
ret['grains'] = __grains__
return ret
def rand_str(size=9999999999, hash_type=None):
'''
Return a random string
size
size of the string to generate
hash_type
hash type to use
.. versionadded:: 2015.5.2
CLI Example:
.. code-block:: bash
salt '*' test.rand_str
'''
if not hash_type:
hash_type = __opts__.get('hash_type', 'md5')
return salt.utils.rand_str(hash_type=hash_type, size=size)
def exception(message='Test Exception'):
'''
Raise an exception
Optionally provide an error message or output the full stack.
CLI Example:
.. code-block:: bash
salt '*' test.exception 'Oh noes!'
'''
raise Exception(message)
def stack():
'''
Return the current stack trace
CLI Example:
.. code-block:: bash
salt '*' test.stack
'''
return ''.join(traceback.format_stack())
def tty(*args, **kwargs): # pylint: disable=W0613
'''
Deprecated! Moved to cmdmod.
CLI Example:
.. code-block:: bash
salt '*' test.tty tty0 'This is a test'
salt '*' test.tty pts3 'This is a test'
'''
return 'ERROR: This function has been moved to cmd.tty'
def try_(module, return_try_exception=False, **kwargs):
'''
Try to run a module command. On an exception return None.
If `return_try_exception` is set True return the exception.
This can be helpful in templates where running a module might fail as expected.
CLI Example:
.. code-block:: bash
<pre>
{% for i in range(0,230) %}
{{ salt['test.try'](module='ipmi.get_users', bmc_host='172.2.2.'+i)|yaml(False) }}
{% endfor %}
</pre>
'''
try:
return __salt__[module](**kwargs)
except Exception as e:
if return_try_exception:
return e
return None
def assertion(assertion):
'''
Assert the given argument
CLI Example:
.. code-block:: bash
salt '*' test.assert False
'''
assert assertion
def true_():
'''
Always return True
CLI Example:
.. code-block:: bash
salt '*' test.true
'''
return True
def false_():
'''
Always return False
CLI Example:
.. code-block:: bash
salt '*' test.false
'''
return False
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.5@@
import logging
from collections import defaultdict
from email.MIMEText import MIMEText
from email.mime.multipart import MIMEMultipart
from os.path import join
from bs4 import BeautifulSoup
from typing import Dict, List, Optional
from webapp2 import RequestHandler
from rogerthat.bizz.communities.communities import get_community
from rogerthat.dal.app import get_app_by_id
from rogerthat.models import ServiceProfile, App, ServiceIdentity
from rogerthat.models.settings import ServiceInfo
from rogerthat.rpc import users
from rogerthat.service.api.system import get_identity
from rogerthat.settings import get_server_settings, ServerSettings
from rogerthat.to import TO
from rogerthat.to.forms import DynamicFormTO, SingleSelectComponentValueTO, SingleSelectComponentTO, ValueTO, \
NextActionURLTO
from rogerthat.to.service import UserDetailsTO
from rogerthat.utils import send_mail_via_mime
from solutions import translate
from solutions.common.bizz.forms.integrations import BaseFormIntegration
from solutions.common.dal import get_solution_settings
from solutions.common.handlers import JINJA_COMPRESSED_ENVIRONMENT
from solutions.common.models.forms import FormSubmission, EmailIntegrationFormConfigTO
from solutions.common.to.forms import FormSubmissionTO
class EmailFormIntegrationConfiguration(TO):
pass
def _convert_mapping(config):
# type: (EmailIntegrationFormConfigTO) -> Dict[str, Dict[str, Dict[str, int]]]
section_mapping = {}
for mapping in config.mapping:
if mapping.section_id not in section_mapping:
section_mapping[mapping.section_id] = {}
if mapping.component_id not in section_mapping[mapping.section_id]:
section_mapping[mapping.section_id][mapping.component_id] = {}
section_mapping[mapping.section_id][mapping.component_id][mapping.component_value] = mapping.group_id
return section_mapping
def _get_group_id_from_mapping(section_mapping, form_submission):
# type: (Dict[str, Dict[str, Dict[str, int]]], FormSubmissionTO) -> Optional[int]
for section in form_submission.sections:
for component in section.components:
if isinstance(component, SingleSelectComponentValueTO):
group_id = section_mapping.get(section.id, {}).get(component.id, {}).get(component.value)
if group_id is not None:
return group_id
return None
def _should_send_email(form, submission_to):
single_select_value_mapping = defaultdict(dict) # type: Dict[str, Dict[str, Dict[str, ValueTO]]]
for section in form.sections:
for component in section.components:
if isinstance(component, SingleSelectComponentTO):
choices_mapping = {choice.value: choice for choice in component.choices}
single_select_value_mapping[section.id][component.id] = choices_mapping
last_section = submission_to.sections[-1]
for component in reversed(last_section.components):
if isinstance(component, SingleSelectComponentValueTO):
choice = single_select_value_mapping[last_section.id][component.id][component.value]
if isinstance(choice.next_action, NextActionURLTO):
return False
return True
class EmailFormIntegration(BaseFormIntegration):
def __init__(self, configuration):
self.configuration = EmailFormIntegrationConfiguration.from_dict(configuration or {})
super(EmailFormIntegration, self).__init__(self.configuration)
def update_configuration(self, form_id, configuration, service_profile):
# Nothing special needs to happen
pass
def submit(self, form_configuration, submission, form, service_profile, user_details):
# type: (dict, FormSubmission, DynamicFormTO, ServiceProfile, UserDetailsTO) -> None
config = EmailIntegrationFormConfigTO.from_dict(form_configuration)
submission_to = FormSubmissionTO.from_model(submission)
if not _should_send_email(form, submission_to):
logging.debug('Not sending form submission email because last submitted component has a NextActionURLTO')
return
section_mapping = _convert_mapping(config)
group_id = _get_group_id_from_mapping(section_mapping, submission_to)
if group_id is None:
if config.default_group:
group_id = config.default_group
else:
logging.debug('Not sending form submission email: there is no default group id assigned')
return
for group in config.email_groups:
if group.id == group_id:
break
logging.debug('Sending form submission email to group %s(%d)', group, group_id)
reply_to_email = user_details.email
_send_form_submission_email(group.emails, service_profile, form, submission_to, reply_to_email)
return None
def _get_form_submission_email_html(settings, service_name, app, language, form, submission):
# type: (ServerSettings, str, App, str, DynamicFormTO, FormSubmissionTO) -> str
signin_url = settings.get_signin_url()
dashboard_url = '<a href="%s">%s</a>' % (signin_url, translate(language, 'dashboard').lower())
footer_html = translate(language, 'forms_email_submission_footer', form_name=form.title, service_name=service_name,
dashboard_url=dashboard_url)
mapping = form.to_mapping()
sections = []
for section_value in submission.sections:
section_mapping = mapping.get(section_value.id)
section_dict = {'section': section_mapping.section, 'components': []}
for component_value in section_value.components:
if component_value.id in section_mapping.components:
section_dict['components'].append({
'component': section_mapping.components[component_value.id],
'value': component_value,
})
if section_dict['components']:
sections.append(section_dict)
html_params = {
'logo_url': settings.baseUrl + '/static/images/public/logo.png',
'form_title': form.title,
'language': language,
'sections': sections,
'footer': footer_html.replace('\n', '<br>'),
}
return JINJA_COMPRESSED_ENVIRONMENT.get_template(join('emails', 'form-submission.tmpl')).render(html_params)
def _send_form_submission_email(emails, service_profile, form, submission, reply_to_email):
# type: (List[str], ServiceProfile, DynamicFormTO, FormSubmissionTO, str) -> None
settings = get_server_settings()
community = get_community(service_profile.community_id)
service_info = ServiceInfo.create_key(service_profile.service_user, ServiceIdentity.DEFAULT).get()
app = get_app_by_id(community.default_app)
lang = get_solution_settings(service_profile.service_user).main_language
mime_root = MIMEMultipart('related')
mime_root['Subject'] = '%s - %s ' % (translate(lang, 'our-city-app').title(), form.title)
mime_root['From'] = '%s <%s>' % (community.name, app.dashboard_email_address)
mime_root['To'] = ', '.join(emails)
mime_root['Reply-To'] = reply_to_email
mime = MIMEMultipart('alternative')
mime_root.attach(mime)
html_body = _get_form_submission_email_html(settings, service_info.name, app, lang, form, submission)
body = BeautifulSoup(html_body, features='lxml').get_text('\n')
mime.attach(MIMEText(body.encode('utf-8'), 'plain', 'utf-8'))
mime.attach(MIMEText(html_body.encode('utf-8'), 'html', 'utf-8'))
send_mail_via_mime(settings.senderEmail, emails, mime_root)
class TestFormSubmissionEmailHandler(RequestHandler):
def get(self, form_id, submission_id):
from rogerthat.bizz.forms import get_form
version = self.request.GET.get('version', 'html')
form = get_form(long(form_id))
form_to = DynamicFormTO.from_model(form)
submission = FormSubmission.create_key(long(submission_id)).get()
submission_to = FormSubmissionTO.from_model(submission)
service_user = users.User(form.service)
lang = get_solution_settings(service_user).main_language
with users.set_user(service_user):
si = get_identity()
app = get_app_by_id(si.app_ids[0])
server_settings = get_server_settings()
html = _get_form_submission_email_html(server_settings, si.name, app, lang, form_to, submission_to)
if version == 'text':
text_version = BeautifulSoup(html, features='lxml').get_text('\n')
self.response.out.write('<pre>%s</pre>' % text_version)
else:
self.response.out.write(html)
|
|
from PyQt4 import QtGui,QtCore
from datetime import datetime
from rq.job import Job
from redis import Redis
from rq import Queue
from gui.experiment_combo_box import ExperimentComboBox
from gui.new_trial_dialog import NewTrialDialog
from gui.sqlalchemy_table_model import SQLAlchemyTableModel
from gui.trial_detail_widget import EchoTrialDetailsWidget, HttpTrialDetailsWidget, RacerDetailsWidget, \
TrialStatusWidget
from lib.racer_driver import execute_trial
from lib.trial_jobs import EchoTrialJob, HTTPTrialJob
from models.experiment import Experiment
from models.trial import Trial
__author__ = 'daniel'
class ExperimentsTab(QtGui.QWidget):
def __init__(self, session = None, parent = None):
super(ExperimentsTab, self).__init__(parent)
self.session = session
self.redis_conn = Redis()
self.current_experiment = None
self.layout = QtGui.QGridLayout()
self.setLayout(self.layout)
# experiments
self.experiment_box = QtGui.QGroupBox(self, title="Experiments")
self.experiment_box.setSizePolicy(QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Minimum)
self.layout.addWidget(self.experiment_box,0,0)
self.experiment_box_layout = QtGui.QGridLayout(self.experiment_box)
self.experiment_box.setLayout(self.experiment_box_layout)
self.experiment_list = ExperimentComboBox(session = session)
self.experiment_list.currentIndexChanged.connect(self.update_current_experiment)
self.experiment_box_layout.addWidget(self.experiment_list, 0, 0)
self.new_experiment_button = QtGui.QPushButton("New Experiment")
self.new_experiment_button.released.connect(self.new_experiment)
self.experiment_box_layout.addWidget(self.new_experiment_button, 0, 1)
# data sources
self.data_box = QtGui.QGroupBox(self, title="Trials for this Experiment")
self.data_box.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding )
self.layout.addWidget(self.data_box,1,0, 1, 1)
self.data_box_layout = QtGui.QGridLayout(self.data_box)
self.data_box.setLayout(self.data_box_layout)
self.trial_table = QtGui.QTableView(self)
# self.trial_table.doubleClicked.connect(self.edit_racer)
# self.trial_table.activated.connect(self.update_current_trial)
self.trial_table.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.trial_table_model = SQLAlchemyTableModel(session, Trial, [
('Type', Trial.discriminator, 'discriminator'),
('Name', Trial.name, 'name'),
('Reps', Trial.reps, 'reps'),
('Start', Trial.start_date, 'start_date'),
('End', Trial.end_date, 'end_date')
])
self.trial_table.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.trial_table.customContextMenuRequested.connect(self.display_context_menu)
self.trial_table.doubleClicked.connect(self.edit_trial)
self.trial_table.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.trial_table_selection_model = QtGui.QItemSelectionModel(self.trial_table_model)
self.trial_table.setModel(self.trial_table_selection_model.model())
self.trial_table.setSelectionModel(self.trial_table_selection_model)
self.trial_table_selection_model.selectionChanged.connect(self.update_current_trial)
self.data_box_layout.addWidget(self.trial_table, 0, 0, 2, 1)
self.add_trial_button = QtGui.QPushButton("New Trial")
self.data_box_layout.addWidget(self.add_trial_button, 0, 1)
self.add_trial_button.released.connect(self.new_trial)
self.trial_details_box = QtGui.QGroupBox("Trial Details")
self.trial_details_box.setSizePolicy(QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Minimum)
self.data_box_layout.addWidget(self.trial_details_box, 2, 0, 1, 1)
self.trial_details_box_layout = QtGui.QGridLayout(self.trial_details_box)
self.echo_trial_details = EchoTrialDetailsWidget()
self.trial_details_box_layout.addWidget(self.echo_trial_details, 0, 0, 1, 1)
self.echo_trial_details.hide()
self.http_trial_details = HttpTrialDetailsWidget()
self.trial_details_box_layout.addWidget(self.http_trial_details, 0, 0, 1, 1)
self.racer_settings_widget = RacerDetailsWidget()
self.trial_details_box_layout.addWidget(self.racer_settings_widget, 1, 0, 1, 1)
self.trial_status = TrialStatusWidget()
# self.trial_status.trial_edit.connect(self.show_edit_dialog)
self.trial_status.trial_started.connect(self.start_trial)
self.trial_status.trial_stopped.connect(self.stop_trial)
self.trial_status.trial_refreshed.connect(self.update_trial_details)
self.trial_status.trial_edit.connect(self.edit_trial)
self.trial_details_box_layout.addWidget(self.trial_status, 0, 1, 2, 1)
self.update_current_experiment(0)
self.trial_table.resizeColumnsToContents()
def display_context_menu(self, pos):
index = self.trial_table.indexAt(pos)
self. menu = QtGui.QMenu()
self.edit_action = self.menu.addAction("Edit")
self.edit_action.triggered.connect(self.edit_trial)
self.duplicate_action = self.menu.addAction("Duplicate")
self.duplicate_action.triggered.connect(self.duplicate_trial)
self.delete_action = self.menu.addAction("Delete")
self.delete_action.triggered.connect(self.delete_trial)
self.feasibility_separator = self.menu.addAction("---- Feasibility Analysis ----")
self.feasibility_separator.setEnabled(False)
self.shorter_action = self.menu.addAction("Set Shorter Trial")
self.shorter_action.triggered.connect(self.setAsShorterTrial)
self.longer_action = self.menu.addAction("Set Longer Trial")
self.longer_action.triggered.connect(self.setAsLongerTrial)
if self.current_trial.end_date is None:
self.shorter_action.setEnabled(False)
self.longer_action.setEnabled(False)
else:
self.shorter_action.setEnabled(True)
self.longer_action.setEnabled(True)
table_viewport = self.trial_table.viewport()
self.menu.popup(table_viewport.mapToGlobal(pos))
def setAsShorterTrial(self):
self.emit(QtCore.SIGNAL("shorter_trial_set(PyQt_PyObject)"), self.current_trial)
def setAsLongerTrial(self):
self.emit(QtCore.SIGNAL("longer_trial_set(PyQt_PyObject)"), self.current_trial)
def update_current_experiment(self, index):
self.current_experiment = self.experiment_list.currentItem()
#self.experiment_name.setText(self.current_experiment.name)
self.update_trial_table()
# self.trial_table.setSelection()
def update_current_trial(self,x,y):
self.current_trial = self.trial_table_selection_model.currentIndex().data(QtCore.Qt.EditRole)
if len(x.indexes()) == 0:
self.trial_status.start_trial_button.setEnabled(False)
self.trial_status.edit_trial_button.setEnabled(False)
self.trial_status.refresh_trial_button.setEnabled(False)
self.trial_status.stop_trial_button.setEnabled(False)
else:
self.update_trial_details()
def edit_trial(self):
dialog = NewTrialDialog(self.session, experiment=self.current_experiment, parent=self, trial=self.current_trial)
dialog.accepted.connect(self.trial_table_model.refresh)
dialog.exec()
def update_trial_details(self):
self.session.refresh(self.current_trial)
self.trial_status.edit_trial_button.setEnabled(True)
self.trial_status.refresh_trial_button.setEnabled(True)
if self.current_trial.start_date == None:
self.trial_status.start_trial_button.setEnabled(True)
self.trial_status.stop_trial_button.setEnabled(False)
else:
self.trial_status.start_trial_button.setEnabled(False)
self.trial_status.stop_trial_button.setEnabled(True)
if(self.current_trial.__class__.__name__ == "HTTPTrial"):
self.echo_trial_details.hide()
self.http_trial_details.show()
self.http_trial_details.request_url.setText(self.current_trial.request_url)
self.http_trial_details.type.setText("HTTP Trial")
else:
self.echo_trial_details.show()
self.http_trial_details.hide()
self.echo_trial_details.delay.setText(str(self.current_trial.delay))
self.echo_trial_details.type.setText("Echo Trial")
self.http_trial_details.name.setText(self.current_trial.name)
self.http_trial_details.description.setText(self.current_trial.description)
self.racer_settings_widget.racer.setText(self.current_trial.racer.hostname)
self.racer_settings_widget.core_id.setText(str(self.current_trial.core_id))
self.racer_settings_widget.real_time.setText(str(self.current_trial.real_time))
self.trial_status.start.setText(str(self.current_trial.start_date))
self.trial_status.end.setText(str(self.current_trial.end_date))
try:
job = Job.fetch(self.current_trial.job, connection=self.redis_conn)
self.trial_status.job_status.setText(job.get_status())
except:
self.trial_status.job_status.setText("not scheduled")
def stop_trial(self):
self.current_trial.start_date = None
self.current_trial.end_date = None
self.session.add(self.current_trial)
self.session.commit()
job = Job.fetch(self.current_trial.job, connection=self.redis_conn)
job.cancel()
self.update_trial_details()
self.trial_table.resizeColumnsToContents()
def delete_trial(self):
reply = QtGui.QMessageBox.question(self, "Confirm", "Really delete the selected trial?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.session.delete(self.current_trial)
self.session.commit()
self.update_trial_table()
def duplicate_trial(self):
new = self.current_trial.duplicate()
self.session.add(new)
self.session.commit()
self.update_trial_table()
def start_trial(self):
q = Queue(self.current_trial.racer.hostname, connection=self.redis_conn)
t = self.current_trial
job = None
if(self.current_trial.__class__.__name__ == "HTTPTrial"):
job = HTTPTrialJob()
job.request = t.request
job.request_url = t.request_url
else:
job = EchoTrialJob()
job.target_host = t.host
job.target_port = t.port
job.delay = t.delay
job.reps = t.reps
job.core_affinity = t.core_id
if t.real_time:
job.real_time = 1
else:
job.real_time = 0
res = q.enqueue_call(func=execute_trial, args=(job,), result_ttl=-1, timeout=1000000)
self.current_trial.job = res.get_id()
self.current_trial.start_date = datetime.now()
self.session.add(self.current_trial)
self.session.commit()
res.save()
self.trial_status.start_trial_button.setEnabled(False)
self.trial_status.stop_trial_button.setEnabled(True)
self.update_trial_details()
self.trial_table.resizeColumnsToContents()
def update_trial_table(self):
self.trial_table_model.setFilter(Trial.experiment==self.current_experiment)
self.trial_table.resizeColumnsToContents()
def new_trial(self):
dialog = NewTrialDialog(self.session, experiment=self.current_experiment, parent=self)
dialog.accepted.connect(self.trial_table_model.refresh)
dialog.exec()
def new_experiment(self):
dialog = QtGui.QInputDialog(self)
dialog.setLabelText("Please enter the name for the new Experiment.")
dialog.textValueSelected.connect(self.store_new_experiment)
dialog.exec()
def store_new_experiment(self, name):
exp = Experiment()
exp.name = name
self.session.add(exp)
self.session.commit()
self.experiment_list.refresh_experiments()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import netaddr
import os
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova import utils
LOG = logging.getLogger(__name__)
linux_net_opts = [
cfg.StrOpt('dhcpbridge_flagfile',
default='/etc/nova/nova-dhcpbridge.conf',
help='location of flagfile for dhcpbridge'),
cfg.StrOpt('networks_path',
default='$state_path/networks',
help='Location to keep network config files'),
cfg.StrOpt('public_interface',
default='eth0',
help='Interface for public IP addresses'),
cfg.StrOpt('network_device_mtu',
default=None,
help='MTU setting for vlan'),
cfg.StrOpt('dhcpbridge',
default='$bindir/nova-dhcpbridge',
help='location of nova-dhcpbridge'),
cfg.StrOpt('routing_source_ip',
default='$my_ip',
help='Public IP of network host'),
cfg.IntOpt('dhcp_lease_time',
default=120,
help='Lifetime of a DHCP lease in seconds'),
cfg.StrOpt('dns_server',
default=None,
help='if set, uses specific dns server for dnsmasq'),
cfg.ListOpt('dmz_cidr',
default=[],
help='A list of dmz range that should be accepted'),
cfg.StrOpt('dnsmasq_config_file',
default='',
help='Override the default dnsmasq settings with this file'),
cfg.StrOpt('linuxnet_interface_driver',
default='nova.network.linux_net.LinuxBridgeInterfaceDriver',
help='Driver used to create ethernet devices.'),
cfg.StrOpt('linuxnet_ovs_integration_bridge',
default='br-int',
help='Name of Open vSwitch bridge used with linuxnet'),
cfg.BoolOpt('send_arp_for_ha',
default=False,
help='send gratuitous ARPs for HA setup'),
cfg.BoolOpt('use_single_default_gateway',
default=False,
help='Use single default gateway. Only first nic of vm will '
'get default gateway from dhcp server'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(linux_net_opts)
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
# add up to 12 characters to binary_name which is used as a prefix,
# so we limit it to 16 characters.
# (max_chain_name_length - len('-POSTROUTING') == 16)
binary_name = os.path.basename(inspect.stack()[-1][1])[:16]
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.chains = set()
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.warn(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the built-in INPUT, OUTPUT, and FORWARD filter chains
are wrapped, meaning that the "real" INPUT chain has a rule that jumps to
the wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the built-in PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the built-in filter chains. Additionally,
there's a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the built-in chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a float-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('float-snat')
self.ipv4['nat'].add_rule('snat', '-j $float-snat')
@utils.synchronized('iptables', external=True)
def apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
for table in tables:
current_table, _err = self.execute('%s-save' % (cmd,),
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
self.execute('%s-restore' % (cmd,), run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
LOG.debug(_("IPTablesManager.apply completed with success"))
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
rules = table.rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
our_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
our_rules += [rule_str]
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % (name,)
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' %
(binary_name, name,)
for name in chains]
seen_lines = set()
def _weed_out_duplicates(line):
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
# We filter duplicates, letting the *last* occurrence take
# precedence.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter.reverse()
return new_filter
# NOTE(jkoelker) This is just a nice little stub point since mocking
# builtins with mox is a nightmare
def write_to_file(file, data, mode='w'):
with open(file, mode) as f:
f.write(data)
def ensure_path(path):
if not os.path.exists(path):
os.makedirs(path)
def metadata_forward():
"""Create forwarding rule for metadata."""
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def metadata_accept():
"""Create the filter accept rule for metadata."""
iptables_manager.ipv4['filter'].add_rule('INPUT',
'-s 0.0.0.0/0 -d %s '
'-p tcp -m tcp --dport %s '
'-j ACCEPT' %
(FLAGS.metadata_host,
FLAGS.metadata_port))
iptables_manager.apply()
def add_snat_rule(ip_range):
iptables_manager.ipv4['nat'].add_rule('snat',
'-s %s -j SNAT --to-source %s' %
(ip_range,
FLAGS.routing_source_ip))
iptables_manager.apply()
def init_host(ip_range=None):
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
if not ip_range:
ip_range = FLAGS.fixed_range
add_snat_rule(ip_range)
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s/32 -j ACCEPT' %
(ip_range, FLAGS.metadata_host))
for dmz in FLAGS.dmz_cidr:
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' %
(ip_range, dmz))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-m conntrack ! --ctstate DNAT '
'-j ACCEPT' %
{'range': ip_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, device):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', floating_ip,
'-A', '-I', device,
'-c', 1, run_as_root=True, check_exit_code=False)
def unbind_floating_ip(floating_ip, device):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', str(floating_ip) + '/32',
'dev', device,
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=[0, 2, 254])
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule('OUTPUT',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('float-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
_execute('sysctl', '-w', 'net.ipv4.ip_forward=1', run_as_root=True)
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
full_ip = '%s/%s' % (network_ref['dhcp_server'],
network_ref['cidr'].rpartition('/')[2])
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == '0.0.0.0' and fields[-1] == dev:
gateway = fields[1]
_execute('route', 'del', 'default', 'gw', gateway,
'dev', dev, run_as_root=True,
check_exit_code=[0, 7])
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True, check_exit_code=[0, 2, 254])
if gateway:
_execute('route', 'add', 'default', 'gw', gateway,
run_as_root=True, check_exit_code=[0, 7])
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', network_ref['dhcp_server'],
'-A', '-I', dev,
'-c', 1, run_as_root=True, check_exit_code=False)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_lease(data))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref):
"""Get network's hosts config in dhcp-host format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
for data in db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host):
hosts.append(_host_dhcp(data))
return '\n'.join(hosts)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
host = None
if network_ref['multi_host']:
host = FLAGS.host
data = db.network_get_associated_fixed_ips(context,
network_ref['id'],
host=host)
if data:
#set of instance ids
instance_set = set([datum['instance_id'] for datum in data])
default_gw_vif = {}
for instance_id in instance_set:
vifs = db.virtual_interface_get_by_instance(context, instance_id)
if vifs:
#offer a default gateway to the first virtual interface
default_gw_vif[instance_id] = vifs[0]['id']
for datum in data:
if instance_id in default_gw_vif:
# we don't want default gateway for this fixed ip
if default_gw_vif[instance_id] != datum['vif_id']:
hosts.append(_host_dhcp_opts(datum))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
def update_dhcp(context, dev, network_ref):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, get_dhcp_hosts(context, network_ref))
restart_dhcp(context, dev, network_ref)
def update_dhcp_hostfile_with_text(dev, hosts_text):
conffile = _dhcp_file(dev, 'conf')
write_to_file(conffile, hosts_text)
def kill_dhcp(dev):
pid = _dnsmasq_pid_for(dev)
if pid:
_execute('kill', '-9', pid, run_as_root=True)
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def restart_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
if FLAGS.use_single_default_gateway:
# NOTE(vish): this will have serious performance implications if we
# are not in multi_host mode.
optsfile = _dhcp_file(dev, 'opts')
write_to_file(optsfile, get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
# Using symlinks can cause problems here so just compare the name
# of the file itself
if conffile.split('/')[-1] in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
return
except Exception as exc: # pylint: disable=W0703
LOG.error(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,%ss' % (network_ref['dhcp_start'],
FLAGS.dhcp_lease_time),
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
if FLAGS.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
write_to_file(conffile, conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.error(_('killing radvd threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(data):
"""Return a host string for an address in leasefile format."""
if data['instance_updated']:
timestamp = data['instance_updated']
else:
timestamp = data['instance_created']
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
data['vif_address'],
data['address'],
data['instance_hostname'] or '*')
def _host_dhcp_network(data):
return 'NW-%s' % data['vif_id']
def _host_dhcp(data):
"""Return a host string for an address in dhcp-host format."""
if FLAGS.use_single_default_gateway:
return '%s,%s.%s,%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'],
'net:' + _host_dhcp_network(data))
else:
return '%s,%s.%s,%s' % (data['vif_address'],
data['instance_hostname'],
FLAGS.dhcp_domain,
data['address'])
def _host_dhcp_opts(data):
"""Return an empty gateway option."""
return '%s,%s' % (_host_dhcp_network(data), 3)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False, run_as_root=True)
return not err
def _dhcp_file(dev, kind):
"""Return path to a pid, leases or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
ensure_path(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
try:
with open(pid_file, 'r') as f:
return int(f.read())
except (ValueError, IOError):
return None
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
interface_driver = None
def _get_interface_driver():
global interface_driver
if not interface_driver:
interface_driver = importutils.import_object(
FLAGS.linuxnet_interface_driver)
return interface_driver
def plug(network, mac_address, gateway=True):
return _get_interface_driver().plug(network, mac_address, gateway)
def unplug(network):
return _get_interface_driver().unplug(network)
def get_dev(network):
return _get_interface_driver().get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API"""
""" for for all Linux interface drivers."""
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
raise NotImplementedError()
def unplug(self, network):
"""Destory Linux device, return device name"""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name"""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
if network.get('vlan', None) is not None:
iface = FLAGS.vlan_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface,
network,
mac_address)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface,
network, gateway)
# NOTE(vish): applying here so we don't get a lock conflict
iptables_manager.apply()
return network['bridge']
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@classmethod
def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@classmethod
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
'id', vlan_num, run_as_root=True)
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
mac_address, run_as_root=True)
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', interface, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
return interface
@classmethod
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(_self, bridge, interface, net_attrs=None, gateway=True):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
old_gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if (fields and fields[0] == '0.0.0.0' and
fields[-1] == interface):
old_gateway = fields[1]
_execute('route', 'del', 'default', 'gw', old_gateway,
'dev', interface, run_as_root=True,
check_exit_code=[0, 7])
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True, check_exit_code=[0, 2, 254])
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True, check_exit_code=[0, 2, 254])
if old_gateway:
_execute('route', 'add', 'default', 'gw', old_gateway,
run_as_root=True, check_exit_code=[0, 7])
if (err and err != "device %s is already a member of a bridge;"
"can't ensubordinate it to bridge %s.\n" % (interface, bridge)):
msg = _('Failed to add interface: %s') % err
raise exception.NovaException(msg)
# Don't forward traffic unless we were told to be a gateway
ipv4_filter = iptables_manager.ipv4['filter']
if gateway:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
else:
ipv4_filter.add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
ipv4_filter.add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, 'type=internal',
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-status=active',
'--', 'set', 'Interface', dev,
'external-ids:attached-mac=%s' % mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', dev, 'mtu',
FLAGS.network_device_mtu, run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
_execute('ovs-ofctl',
'add-flow', bridge, 'priority=1,actions=drop',
run_as_root=True)
_execute('ovs-ofctl', 'add-flow', bridge,
'udp,tp_dst=67,dl_dst=%s,priority=2,actions=normal' %
mac_address, run_as_root=True)
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
return dev
def unplug(self, network):
dev = self.get_dev(network)
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl', '--', '--if-exists', 'del-port',
bridge, dev, run_as_root=True)
return dev
def get_dev(self, network):
dev = 'gw-' + str(network['uuid'][0:11])
return dev
# plugs interfaces using Linux Bridge when using QuantumManager
class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
BRIDGE_NAME_PREFIX = 'brq'
GATEWAY_INTERFACE_PREFIX = 'gw-'
def plug(self, network, mac_address, gateway=True):
dev = self.get_dev(network)
bridge = self.get_bridge(network)
if not gateway:
# If we weren't instructed to act as a gateway then add the
# appropriate flows to block all non-dhcp traffic.
# .. and make sure iptbles won't forward it as well.
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j DROP' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j DROP' % bridge)
return bridge
else:
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % bridge)
QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev, mac_address)
if not _device_exists(bridge):
LOG.debug(_("Starting bridge %s "), bridge)
utils.execute('brctl', 'addbr', bridge, run_as_root=True)
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
LOG.debug(_("Done starting bridge %s"), bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
run_as_root=True)
return dev
def unplug(self, network):
dev = self.get_dev(network)
if not _device_exists(dev):
return None
else:
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
except exception.ProcessExecutionError:
LOG.error(_("Failed unplugging gateway interface '%s'"), dev)
raise
LOG.debug(_("Unplugged gateway interface '%s'"), dev)
return dev
@classmethod
def create_tap_dev(_self, dev, mac_address=None):
if not _device_exists(dev):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
return dev
def get_bridge(self, network):
bridge = self.BRIDGE_NAME_PREFIX + str(network['uuid'][0:11])
return bridge
iptables_manager = IptablesManager()
|
|
"""
Contains generic class-based CRUD views.
"""
from django.contrib import messages
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.vary import vary_on_headers
from django.views.generic import CreateView as _CreateView
from django.views.generic import DeleteView as _DeleteView
from django.views.generic import FormView
from django.views.generic import ListView
from django.views.generic import UpdateView as _UpdateView
from wagtail.wagtailadmin.edit_handlers import BaseObjectList
from wagtail.wagtailadmin.edit_handlers import extract_panel_definitions_from_model_class
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailsearch.backends import get_search_backends
class IndexView(ListView):
"""
Generic view class for listing existing instances.
"""
page_kwarg = 'p'
paginate_by = 20
index_template = None
results_template = None
@vary_on_headers('X-Requested-With')
def get(self, request, *args, **kwargs):
"""
Processes GET request.
:param request: the request instance.
:rtype: django.http.HttpResponse.
"""
return super(IndexView, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""
Returns context data dictionary.
:rtype: dict.
"""
# Initialize common variables.
queryset = kwargs.pop('object_list', self.object_list)
context_object_name = self.get_context_object_name(queryset)
# Determine the desired ordering.
ordering = self.request.GET.get('ordering', '')
fields = [f.name for f in self.model._meta.fields]
if not ordering.replace('-', '') in fields and hasattr(self.model, 'created_at'):
ordering = '-created_at'
if ordering:
queryset = queryset.order_by(ordering)
# Are we searching?
is_searching = False
query_string = None
plural = unicode(self.model._meta.verbose_name_plural)
placeholder = _(u'Search {0}'.format(plural))
if 'q' in self.request.GET:
form = SearchForm(self.request.GET, placeholder=placeholder)
if form.is_valid():
query_string = form.cleaned_data['q']
is_searching = True
queryset = self.model.search(query_string)
else:
form = SearchForm(placeholder=placeholder)
# Paginate the results.
page_size = self.get_paginate_by(queryset)
page = None
if page_size:
# Get paginated results.
paginator, page, queryset, is_paginated = self.paginate_queryset(
queryset,
page_size
)
# Add common context data.
kwargs.update({
'object_list': queryset,
'ordering': ordering,
'query_string': query_string,
'is_searching': is_searching,
'page': page,
})
# Add non-Ajax context data.
if not self.request.is_ajax():
kwargs.update({
'search_form': form,
'popular_tags': self.model.popular_tags(),
})
if context_object_name is not None:
kwargs.update({context_object_name: queryset})
return kwargs
def get_template_names(self):
"""
Returns list of template names.
:rtype: list.
"""
names = [self.index_template]
if self.request.is_ajax():
names = [self.results_template]
return names
class BaseFormView(FormView):
"""
Generic view class for handling forms.
"""
model = None
def form_invalid(self, form):
"""
Processes unsuccessful form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
"""
# Add error message(s).
default = _(u'The {0} could not be saved due to errors'.format(
self.model._meta.verbose_name_raw.lower()
))
for error in form.non_field_errors() or [default]:
messages.error(self.request, error)
# Return the response.
return super(BaseFormView, self).form_invalid(form)
def form_valid(self, form):
"""
Processes successful form submittal.
:param form: the form instance.
:rtype: django.http.HttpResponse.
"""
# Set the object from form data.
self.object = form.save()
# Reindex the instance to make sure all tags are indexed.
for backend in get_search_backends():
backend.add(self.object)
# Add success message.
messages.success(
self.request,
_(u"{0} '{1}' saved.").format(
unicode(self.model._meta.verbose_name),
unicode(self.object)
)
)
# Redirect to success URL.
return redirect(self.success_url)
def get_context_data(self, **kwargs):
"""
Returns context data dictionary.
:rtype: dict.
"""
edit_handler_class = self.get_edit_handler_class()
instance = self.object or self.model()
form = kwargs.get('form', self.get_form_class())
kwargs.update({
'edit_handler': edit_handler_class(instance=instance, form=form)
})
return kwargs
def get_edit_handler_class(self):
"""
Returns edit handler class for view model.
:rtype: class.
"""
# Custom class that skips adding missing fields, as we may want to
# limit the number of fields presented to the user.
class BaseChooserObjectList(BaseObjectList):
def render_missing_fields(self):
return ''
def ChooserObjectList(children):
return type('_ChooserObjectList', (BaseChooserObjectList,), {
'children': children,
})
if hasattr(self.model, 'get_edit_handler'):
handler_class = self.model.get_edit_handler()
else:
handler_class = ChooserObjectList(
extract_panel_definitions_from_model_class(self.model)
)
# Return the edit handler class.
return handler_class
def get_form_class(self):
"""
Returns form class for view model.
:rtype: class.
"""
if self.form_class:
return self.form_class
else:
return self.get_edit_handler_class().get_form_class(self.model)
class CreateView(BaseFormView, _CreateView):
"""
Generic view class for adding new instances via edit handlers.
"""
pass
class UpdateView(BaseFormView, _UpdateView):
"""
Generic view class for editing existing instances via edit handlers.
"""
pass
class DeleteView(_DeleteView):
"""
Generic view class for deleting existing instances.
"""
def delete(self, request, *args, **kwargs):
"""
Deletes specified model instance.
:param request: the request instance.
:rtype: django.http.HttpResponse.
"""
# Set object instance and redirect URL.
self.object = self.get_object()
success_url = self.get_success_url()
# Delete the instance.
self.object.delete()
# Add success message.
messages.success(
self.request,
_(u"{0} '{1}' deleted.").format(
unicode(self.model._meta.verbose_name),
unicode(self.object)
)
)
# Return the response.
return redirect(success_url)
|
|
# -*- coding: utf-8 -*-
import os
import unittest
from StringIO import StringIO
from mimecat import (Catalogue, _canonicalize_extension,
_parse_file, _parse_line)
TEST_MIME_TYPES = """
# This file maps Internet media types to unique file extension(s).
# Although created for httpd, this file is used by many software systems
# and has been placed in the public domain for unlimited redisribution.
#
# The table below contains both registered and (common) unregistered types.
# A type that has no unique extension can be ignored -- they are listed
# here to guide configurations toward known types and to make it easier to
# identify "new" types. File extensions are also commonly used to indicate
# content languages and encodings, so choose them carefully.
#
# Internet media types should be registered as described in RFC 4288.
# The registry is at <http://www.iana.org/assignments/media-types/>.
#
# MIME type (lowercased) Extensions
# ============================================ ==========
# application/activemessage
application/andrew-inset ez
application/json json
# application/kpml-request+xml
# audio/amr
audio/midi mid midi kar rmi
# audio/mobile-xmf
audio/mp4 mp4a
audio/mp4a-latm m4a m4p
audio/ogg oga ogg spx
image/jpeg jpeg jpg jpe
# image/jpm
# message/cpim
# message/delivery-status
message/rfc822 eml mime
text/css css
text/plain txt text conf def list log in
# text/xml
video/3gpp 3gp
video/3gpp2 3g2
video/ogg ogv
"""
class CatalogueTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_filename = "test.mime.types"
cls.test_filename_shibboleth = "test-shibboleth.mime.types"
with open(cls.test_filename, "w") as filep:
filep.write(TEST_MIME_TYPES)
with open(cls.test_filename_shibboleth, "w") as filep:
filep.write("text/plain2 txt\n")
filep.write("text/plain txt2\n")
@classmethod
def tearDownClass(cls):
os.unlink(cls.test_filename)
os.unlink(cls.test_filename_shibboleth)
def setUp(self):
self.catalogue = Catalogue(self.test_filename)
self.empty_catalogue = Catalogue(self.test_filename)
self.empty_catalogue.clear()
def test_init(self):
cat = Catalogue(self.test_filename)
self.assertIn("message/rfc822",
cat._known_mimetypes)
def test_init_with_filep(self):
with open(self.test_filename, "r") as filep:
cat = Catalogue(filep = filep)
self.assertIn("message/rfc822",
cat._known_mimetypes)
def test_init_with_order(self):
with open(self.test_filename, "r") as filep:
cat = Catalogue(self.test_filename_shibboleth, filep)
# test_filename should've been used first, so text/plain2 should
# come after text/plain in the extensions to type map
type_list = cat._exts_to_types[".txt"]
self.assertGreater(type_list.index("text/plain2"),
type_list.index("text/plain"))
def test_init_fails(self):
cat = None
with self.assertRaises(IOError):
cat = Catalogue(["BOGUS_FILE"])
self.assertIsNone(cat)
def test_clear(self):
self.catalogue.clear()
self.assertEqual( {}, self.catalogue._types_to_exts)
self.assertEqual( {}, self.catalogue._exts_to_types)
self.assertEqual(set(), self.catalogue._known_mediatypes)
self.assertEqual(set(), self.catalogue._known_mimetypes)
self.assertEqual(set(), self.catalogue._known_extensions)
def test_load_filenames_stops(self):
self.empty_catalogue.load_filenames([self.test_filename_shibboleth,
self.test_filename],
True)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_load_filenames_does_not_stop(self):
self.empty_catalogue.load_filenames([self.test_filename_shibboleth,
self.test_filename], False)
self.assertGreater(len(self.empty_catalogue._known_mediatypes), 1)
self.assertGreater(len(self.empty_catalogue._known_mimetypes), 2)
self.assertGreater(len(self.empty_catalogue._known_extensions), 2)
def test_load_filenames_fail(self):
with self.assertRaises(IOError):
self.empty_catalogue.load_filenames(["BOGUS_FILE", "BOGUS_FILE2"])
def test_load_filename(self):
self.empty_catalogue.load_filename(self.test_filename_shibboleth)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_load_filename_fails(self):
with self.assertRaises(IOError):
self.empty_catalogue.load_filename("BOGUS_FILE")
def test_load_file(self):
with open(self.test_filename_shibboleth) as filep:
self.empty_catalogue.load_file(filep)
self.assertEqual(len(self.empty_catalogue._known_mediatypes), 1)
self.assertEqual(len(self.empty_catalogue._known_mimetypes), 2)
self.assertEqual(len(self.empty_catalogue._known_extensions), 2)
def test_parse_file(self):
with open(self.test_filename_shibboleth) as filep:
items = [item for item in _parse_file(filep) if item is not None]
self.assertEqual(len(items), 2)
with open(self.test_filename) as filep:
items = [item for item in _parse_file(filep) if item is not None]
self.assertEqual(len(items), 13)
def test_parse_line(self):
result = _parse_line("#")
self.assertIsNone(result)
result = _parse_line("# more")
self.assertIsNone(result)
result = _parse_line("text/plain")
self.assertEqual(("text/plain", []), result)
result = _parse_line("text/plain ext1 ext2 ext3")
self.assertEqual(("text/plain", [".ext1", ".ext2", ".ext3"]), result)
result = _parse_line("text/plain ext1 ext2 ext3 # with comment")
self.assertEqual(("text/plain", [".ext1", ".ext2", ".ext3"]), result)
result = _parse_line("# text/plain ext1 ext2 ext3")
self.assertIsNone(result)
result = _parse_line("# text/plain ext1 ext2 ext3 # with comment")
self.assertIsNone(result)
def test_parse_line_fails(self):
with self.assertRaises(ValueError):
_ = _parse_line("invalid exts")
def test_known_mediatypes(self):
self.assertIn("application", self.catalogue.known_mediatypes)
self.assertIn("text", self.catalogue.known_mediatypes)
def test_known_mimetypes(self):
self.assertIn("application/json", self.catalogue.known_mimetypes)
self.assertIn("audio/mp4", self.catalogue.known_mimetypes)
def test_known_extensions(self):
self.assertIn(".ez", self.catalogue.known_extensions)
self.assertIn(".m4a", self.catalogue.known_extensions)
def test_get_extensions(self):
exts = self.catalogue.get_extensions("audio/midi")
self.assertEqual(len(exts), 4)
def test_get_extensions_fails(self):
with self.assertRaises(KeyError):
self.catalogue.get_extensions("bad/type")
def test_get_types(self):
types = self.catalogue.get_types(".txt")
self.assertEqual(len(types), 1)
types = self.catalogue.get_types("txt")
self.assertEqual(len(types), 1)
def test_get_types_with_duplicate(self):
self.catalogue.add_type("text/plain2", ".txt")
types = self.catalogue.get_types("txt")
self.assertIn("text/plain", types)
self.assertIn("text/plain2", types)
def test_get_types_fails(self):
with self.assertRaises(KeyError):
self.catalogue.get_types("asdf")
def test_add_type(self):
self.empty_catalogue.add_type("text/plain", "txt")
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
self.empty_catalogue.clear()
self.empty_catalogue.add_type("text/plain", ".txt")
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
self.empty_catalogue.clear()
self.empty_catalogue.add_type("text/plain", [".txt"])
self.assertIn("text", self.empty_catalogue._known_mediatypes)
self.assertIn("text/plain", self.empty_catalogue._known_mimetypes)
self.assertIn(".txt", self.empty_catalogue._known_extensions)
def test_add_types_with_duplicate_extensions(self):
self.empty_catalogue.add_type("text/plain", "txt")
self.empty_catalogue.add_type("text/doc", "txt")
self.assertIn("text/plain", self.empty_catalogue._exts_to_types[".txt"])
self.assertIn("text/doc", self.empty_catalogue._exts_to_types[".txt"])
self.assertIn(".txt", self.empty_catalogue._types_to_exts["text/plain"])
self.assertIn(".txt", self.empty_catalogue._types_to_exts["text/doc"])
def test_add_type_fails(self):
with self.assertRaises(ValueError):
self.empty_catalogue.add_type("textplain", ".txt")
def test_canonicalize_extension(self):
ret = _canonicalize_extension("test")
self.assertEqual(ret, ".test")
ret = _canonicalize_extension(".test")
self.assertEqual(ret, ".test")
ret = _canonicalize_extension("")
self.assertEqual(ret, "")
ret = _canonicalize_extension(None)
self.assertIsNone(ret)
|
|
import time, unittest
from reset_database import reset_database, reset_data
from flask import url_for
from modules.Shared.database import db
from shepard import create_flask
# http://flask.pocoo.org/docs/0.11/testing/
# http://flask.pocoo.org/docs/0.11/api/#flask.Response
# https://docs.python.org/2/library/unittest.html#assert-methods
class CityTest(unittest.TestCase):
# Run once during class instatiation
@classmethod
def setup_class(cls):
reset_database()
app = create_flask()
app.testing = True
db.init_app(app)
cls.context = app.test_request_context()
cls.app = app.test_client()
# Run once during class termination
@classmethod
def teardown_class(cls):
# close any existing db connections
db.session.close_all()
### start of tests
# test city homepage works
def test_city_default(self):
with self.context:
result = self.app.get(url_for('cities.view_all_cities'))
self.assertIn('Cities', result.data)
self.assertIn('Dali', result.data)
self.assertIn('Tours', result.data)
self.assertIn('<a href="/cities/add">Add city</a>', result.data)
# view city number 1
def test_city_view1(self):
result = self.app.get('/cities/view/1')
self.assertIn('Dali', result.data)
self.assertIn('<a href="/cities/edit/1">Edit</a>', result.data)
self.assertIn('<a href="/cities/">View All</a>', result.data)
# try to view a city that does not exist
def test_city_view_none(self):
with self.context:
result = self.app.get(
url_for('cities.view_one_city', city_id=99),
follow_redirects=True
)
self.assertIn('Entry does not exist.', result.data)
self.assertIn('<h1>Cities</h1>', result.data)
# view the add page
def test_city_add(self):
result = self.app.get('/cities/add')
self.assertIn('Add A City', result.data)
self.assertIn('City Name', result.data)
#####using country ids created from country testing
# test adding a city with valid outcome
def test_city_add_valid(self):
result = self.app.post(
'/cities/add',
data={
'city_name': 'Catal',
'country_id': '3',
'region_id': '3'
},
follow_redirects=True
)
self.assertIn('<h1>Catal</h1>', result.data)
self.assertIn('<td>Catal</td>', result.data)
self.assertIn('<td>3</td>', result.data)
self.assertIn('Edit', result.data)
self.assertIn('Delete', result.data)
def test_city_add_null(self):
result = self.app.post(
'/cities/add',
data={},
follow_redirects=True
)
self.assertIn('Please fill in the city name.', result.data)
self.assertIn('Please choose the country.', result.data)
self.assertIn('Please choose the region.', result.data)
# test adding a city with invalid result
def test_city_add_invalid(self):
# send invalid name and non-number id
result = self.app.post(
'/cities/add',
data={
'city_name': 'Greec1',
'country_id': 'gg',
'region_id': 'gg'
},
follow_redirects=True
)
self.assertIn('Please fill in a city name only with English letters.',
result.data)
self.assertIn('Please choose the country.', result.data)
self.assertIn('Please choose the region.', result.data)
self.assertIn('<option value="1">Cyprus</option>', result.data)
# test editing a city page
def test_city_edit3(self):
result = self.app.get('/cities/edit/3')
self.assertIn('Edit A City', result.data)
self.assertIn('Istanbul', result.data)
self.assertIn('<option value="3" selected>Turkey</option>', result.data)
# test editing a city
def test_city_edit_valid(self):
result = self.app.post(
'/cities/edit/3',
data={
'city_name': 'Istabuli',
'country_id': 3,
'region_id': 3
},
follow_redirects=True
)
self.assertIn('<h1>Istabuli</h1>', result.data)
self.assertIn('<a href="/cities/edit/3">Edit</a>', result.data)
self.assertIn('<a href="/cities/delete/3">Delete</a>', result.data)
# test invalid editing a city
def test_city_edit_invalid(self):
result = self.app.post(
'/cities/edit/3',
data={
'city_name': '88888',
'country_id': '',
'region_id': ''
},
follow_redirects=True
)
self.assertIn('Please fill in a city name only with English letters.',
result.data)
self.assertIn('Please choose the country.', result.data)
self.assertIn('Please choose the region.', result.data)
# test that the city forms are validating data corretly
def test_city_form_validation1(self):
# ensure data is being cleansed, in ways we havent above
# response
# http://flask.pocoo.org/docs/0.11/api/#flask.Response
result = self.app.post(
'/cities/add',
data={
'city_name': '',
'country_id': 9895,
'region_id': 9899
},
follow_redirects=True
)
# print result.data
self.assertIn('Please fill in the city name completely.', result.data)
# self.assertIn('Please choose a valid country.', result.data)
def test_city_form_validation2(self):
# ensure data is being cleansed, in ways we havent above
# response
# http://flask.pocoo.org/docs/0.11/api/#flask.Response
result = self.app.post(
'/cities/add',
data={
'city_name': 'Hhhhh928(@@*@!!',
'country_id': 99.9999,
'region_id': 99.9999
},
follow_redirects=True
)
# print result.data
self.assertIn('Please fill in a city name only with English letters.',
result.data)
# this may be passing because its casting to an int on the backend
# self.assertIn('Please choose a valid country id.', result.data)
# test that the cities are being deleted correctly
def test_city_delete_valid(self):
result = self.app.get(
'/cities/delete/2',
follow_redirects=True
)
# print result.data
self.assertNotIn('Tours', result.data)
# test that site responds correctly to invalid delete requests
def test_city_delete_invalid(self):
result = self.app.get(
'/cities/delete/10',
follow_redirects=True
)
self.assertEqual(400, result.status_code)
self.assertIn('Entry does not exist.', result.data)
if __name__ == '__main__':
unittest.main()
|
|
from south.db import db
from django.db import models
from ietf.community.models import *
class Migration:
def forwards(self, orm):
# Adding model 'Rule'
db.create_table('community_rule', (
('id', orm['community.Rule:id']),
('community_list', orm['community.Rule:community_list']),
('rule_type', orm['community.Rule:rule_type']),
('value', orm['community.Rule:value']),
('last_updated', orm['community.Rule:last_updated']),
))
db.send_create_signal('community', ['Rule'])
# Adding model 'CommunityList'
db.create_table('community_communitylist', (
('id', orm['community.CommunityList:id']),
('user', orm['community.CommunityList:user']),
('group', orm['community.CommunityList:group']),
))
db.send_create_signal('community', ['CommunityList'])
# Adding ManyToManyField 'Rule.cached_ids'
db.create_table('community_rule_cached_ids', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('rule', models.ForeignKey(orm.Rule, null=False)),
('document', models.ForeignKey(orm['doc.Document'], null=False))
))
# Adding ManyToManyField 'CommunityList.added_ids'
db.create_table('community_communitylist_added_ids', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('communitylist', models.ForeignKey(orm.CommunityList, null=False)),
('document', models.ForeignKey(orm['doc.Document'], null=False))
))
def backwards(self, orm):
# Deleting model 'Rule'
db.delete_table('community_rule')
# Deleting model 'CommunityList'
db.delete_table('community_communitylist')
# Dropping ManyToManyField 'Rule.cached_ids'
db.delete_table('community_rule_cached_ids')
# Dropping ManyToManyField 'CommunityList.added_ids'
db.delete_table('community_communitylist_added_ids')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)"},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'community.communitylist': {
'added_ids': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.Document']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'community.rule': {
'cached_ids': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.Document']"}),
'community_list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['community.CommunityList']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'rule_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'doc.docalias': {
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doc.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'doc.document': {
'abstract': ('django.db.models.fields.TextField', [], {}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ad_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['person.Email']", 'blank': 'True'}),
'external_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'iana_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IanaDocStateName']", 'null': 'True', 'blank': 'True'}),
'iesg_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IesgDocStateName']", 'null': 'True', 'blank': 'True'}),
'intended_std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.IntendedStdLevelName']", 'null': 'True', 'blank': 'True'}),
'internal_comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'notify': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'pages': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'related': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['doc.DocAlias']", 'blank': 'True'}),
'rev': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'rfc_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.RfcDocStateName']", 'null': 'True', 'blank': 'True'}),
'shepherd': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shepherd_document_set'", 'null': 'True', 'to': "orm['person.Person']"}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocStateName']", 'null': 'True', 'blank': 'True'}),
'std_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.StdLevelName']", 'null': 'True', 'blank': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocStreamName']", 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['name.DocInfoTagName']", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.DocTypeName']", 'null': 'True', 'blank': 'True'}),
'wg_state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.WgDocStateName']", 'null': 'True', 'blank': 'True'})
},
'group.group': {
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16', 'db_index': 'True'}),
'ad': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True', 'blank': 'True'}),
'charter': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'chartered_group'", 'unique': 'True', 'null': 'True', 'to': "orm['doc.Document']"}),
'comments': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_archive': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'list_email': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'list_subscribe': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['group.Group']", 'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupStateName']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['name.GroupTypeName']", 'null': 'True'})
},
'name.docinfotagname': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.docstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.docstreamname': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.doctypename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.groupstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.grouptypename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.ianadocstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.iesgdocstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.intendedstdlevelname': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.rfcdocstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.stdlevelname': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'name.wgdocstatename': {
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '8', 'primary_key': 'True'}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'})
},
'person.email': {
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['person.Person']", 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'person.person': {
'address': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'ascii': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ascii_short': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['community']
|
|
"""
Line editing functionality.
---------------------------
This provides a UI for a line input, similar to GNU Readline, libedit and
linenoise.
Either call the `prompt` function for every line input. Or create an instance
of the :class:`.PromptSession` class and call the `prompt` method from that
class. In the second case, we'll have a 'session' that keeps all the state like
the history in between several calls.
There is a lot of overlap between the arguments taken by the `prompt` function
and the `PromptSession` (like `completer`, `style`, etcetera). There we have
the freedom to decide which settings we want for the whole 'session', and which
we want for an individual `prompt`.
Example::
# Simple `prompt` call.
result = prompt('Say something: ')
# Using a 'session'.
s = PromptSession()
result = s.prompt('Say something: ')
"""
from asyncio import get_event_loop
from enum import Enum
from functools import partial
from typing import (
TYPE_CHECKING,
Callable,
Generic,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from prompt_toolkit.application import Application
from prompt_toolkit.application.current import get_app
from prompt_toolkit.auto_suggest import AutoSuggest, DynamicAutoSuggest
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.clipboard import Clipboard, DynamicClipboard, InMemoryClipboard
from prompt_toolkit.completion import Completer, DynamicCompleter, ThreadedCompleter
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, SEARCH_BUFFER, EditingMode
from prompt_toolkit.filters import (
Condition,
FilterOrBool,
has_arg,
has_focus,
is_done,
is_true,
renderer_height_is_known,
to_filter,
)
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
fragment_list_to_text,
merge_formatted_text,
to_formatted_text,
)
from prompt_toolkit.history import History, InMemoryHistory
from prompt_toolkit.input.base import Input
from prompt_toolkit.key_binding.bindings.auto_suggest import load_auto_suggest_bindings
from prompt_toolkit.key_binding.bindings.completion import (
display_completions_like_readline,
)
from prompt_toolkit.key_binding.bindings.open_in_editor import (
load_open_in_editor_bindings,
)
from prompt_toolkit.key_binding.key_bindings import (
ConditionalKeyBindings,
DynamicKeyBindings,
KeyBindings,
KeyBindingsBase,
merge_key_bindings,
)
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.keys import Keys
from prompt_toolkit.layout import Float, FloatContainer, HSplit, Window
from prompt_toolkit.layout.containers import ConditionalContainer, WindowAlign
from prompt_toolkit.layout.controls import (
BufferControl,
FormattedTextControl,
SearchBufferControl,
)
from prompt_toolkit.layout.dimension import Dimension
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.menus import CompletionsMenu, MultiColumnCompletionsMenu
from prompt_toolkit.layout.processors import (
AppendAutoSuggestion,
ConditionalProcessor,
DisplayMultipleCursors,
DynamicProcessor,
HighlightIncrementalSearchProcessor,
HighlightSelectionProcessor,
PasswordProcessor,
Processor,
ReverseSearchProcessor,
merge_processors,
)
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.lexers import DynamicLexer, Lexer
from prompt_toolkit.output import ColorDepth, DummyOutput, Output
from prompt_toolkit.styles import (
BaseStyle,
ConditionalStyleTransformation,
DynamicStyle,
DynamicStyleTransformation,
StyleTransformation,
SwapLightAndDarkStyleTransformation,
merge_style_transformations,
)
from prompt_toolkit.utils import (
get_cwidth,
is_dumb_terminal,
suspend_to_background_supported,
to_str,
)
from prompt_toolkit.validation import DynamicValidator, Validator
from prompt_toolkit.widgets.toolbars import (
SearchToolbar,
SystemToolbar,
ValidationToolbar,
)
if TYPE_CHECKING:
from prompt_toolkit.formatted_text.base import MagicFormattedText
__all__ = [
"PromptSession",
"prompt",
"confirm",
"create_confirm_session", # Used by '_display_completions_like_readline'.
"CompleteStyle",
]
_StyleAndTextTuplesCallable = Callable[[], StyleAndTextTuples]
E = KeyPressEvent
def _split_multiline_prompt(
get_prompt_text: _StyleAndTextTuplesCallable,
) -> Tuple[
Callable[[], bool], _StyleAndTextTuplesCallable, _StyleAndTextTuplesCallable
]:
"""
Take a `get_prompt_text` function and return three new functions instead.
One that tells whether this prompt consists of multiple lines; one that
returns the fragments to be shown on the lines above the input; and another
one with the fragments to be shown at the first line of the input.
"""
def has_before_fragments() -> bool:
for fragment, char, *_ in get_prompt_text():
if "\n" in char:
return True
return False
def before() -> StyleAndTextTuples:
result: StyleAndTextTuples = []
found_nl = False
for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):
if found_nl:
result.insert(0, (fragment, char))
elif char == "\n":
found_nl = True
return result
def first_input_line() -> StyleAndTextTuples:
result: StyleAndTextTuples = []
for fragment, char, *_ in reversed(explode_text_fragments(get_prompt_text())):
if char == "\n":
break
else:
result.insert(0, (fragment, char))
return result
return has_before_fragments, before, first_input_line
class _RPrompt(Window):
"""
The prompt that is displayed on the right side of the Window.
"""
def __init__(self, text: AnyFormattedText) -> None:
super().__init__(
FormattedTextControl(text=text),
align=WindowAlign.RIGHT,
style="class:rprompt",
)
class CompleteStyle(str, Enum):
"""
How to display autocompletions for the prompt.
"""
value: str
COLUMN = "COLUMN"
MULTI_COLUMN = "MULTI_COLUMN"
READLINE_LIKE = "READLINE_LIKE"
# Formatted text for the continuation prompt. It's the same like other
# formatted text, except that if it's a callable, it takes three arguments.
PromptContinuationText = Union[
str,
"MagicFormattedText",
StyleAndTextTuples,
# (prompt_width, line_number, wrap_count) -> AnyFormattedText.
Callable[[int, int, int], AnyFormattedText],
]
_T = TypeVar("_T")
class PromptSession(Generic[_T]):
"""
PromptSession for a prompt application, which can be used as a GNU Readline
replacement.
This is a wrapper around a lot of ``prompt_toolkit`` functionality and can
be a replacement for `raw_input`.
All parameters that expect "formatted text" can take either just plain text
(a unicode object), a list of ``(style_str, text)`` tuples or an HTML object.
Example usage::
s = PromptSession(message='>')
text = s.prompt()
:param message: Plain text or formatted text to be shown before the prompt.
This can also be a callable that returns formatted text.
:param multiline: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True, prefer a layout that is more adapted for multiline input.
Text after newlines is automatically indented, and search/arg input is
shown below the input, instead of replacing the prompt.
:param wrap_lines: `bool` or :class:`~prompt_toolkit.filters.Filter`.
When True (the default), automatically wrap long lines instead of
scrolling horizontally.
:param is_password: Show asterisks instead of the actual typed characters.
:param editing_mode: ``EditingMode.VI`` or ``EditingMode.EMACS``.
:param vi_mode: `bool`, if True, Identical to ``editing_mode=EditingMode.VI``.
:param complete_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable autocompletion while
typing.
:param validate_while_typing: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable input validation while
typing.
:param enable_history_search: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Enable up-arrow parting
string matching.
:param search_ignore_case:
:class:`~prompt_toolkit.filters.Filter`. Search case insensitive.
:param lexer: :class:`~prompt_toolkit.lexers.Lexer` to be used for the
syntax highlighting.
:param validator: :class:`~prompt_toolkit.validation.Validator` instance
for input validation.
:param completer: :class:`~prompt_toolkit.completion.Completer` instance
for input completion.
:param complete_in_thread: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Run the completer code in a
background thread in order to avoid blocking the user interface.
For ``CompleteStyle.READLINE_LIKE``, this setting has no effect. There
we always run the completions in the main thread.
:param reserve_space_for_menu: Space to be reserved for displaying the menu.
(0 means that no space needs to be reserved.)
:param auto_suggest: :class:`~prompt_toolkit.auto_suggest.AutoSuggest`
instance for input suggestions.
:param style: :class:`.Style` instance for the color scheme.
:param include_default_pygments_style: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Tell whether the default
styling for Pygments lexers has to be included. By default, this is
true, but it is recommended to be disabled if another Pygments style is
passed as the `style` argument, otherwise, two Pygments styles will be
merged.
:param style_transformation:
:class:`~prompt_toolkit.style.StyleTransformation` instance.
:param swap_light_and_dark_colors: `bool` or
:class:`~prompt_toolkit.filters.Filter`. When enabled, apply
:class:`~prompt_toolkit.style.SwapLightAndDarkStyleTransformation`.
This is useful for switching between dark and light terminal
backgrounds.
:param enable_system_prompt: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing Meta+'!' will show
a system prompt.
:param enable_suspend: `bool` or :class:`~prompt_toolkit.filters.Filter`.
Enable Control-Z style suspension.
:param enable_open_in_editor: `bool` or
:class:`~prompt_toolkit.filters.Filter`. Pressing 'v' in Vi mode or
C-X C-E in emacs mode will open an external editor.
:param history: :class:`~prompt_toolkit.history.History` instance.
:param clipboard: :class:`~prompt_toolkit.clipboard.Clipboard` instance.
(e.g. :class:`~prompt_toolkit.clipboard.InMemoryClipboard`)
:param rprompt: Text or formatted text to be displayed on the right side.
This can also be a callable that returns (formatted) text.
:param bottom_toolbar: Formatted text or callable which is supposed to
return formatted text.
:param prompt_continuation: Text that needs to be displayed for a multiline
prompt continuation. This can either be formatted text or a callable
that takes a `prompt_width`, `line_number` and `wrap_count` as input
and returns formatted text. When this is `None` (the default), then
`prompt_width` spaces will be used.
:param complete_style: ``CompleteStyle.COLUMN``,
``CompleteStyle.MULTI_COLUMN`` or ``CompleteStyle.READLINE_LIKE``.
:param mouse_support: `bool` or :class:`~prompt_toolkit.filters.Filter`
to enable mouse support.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
:param input: `Input` object. (Note that the preferred way to change the
input/output is by creating an `AppSession`.)
:param output: `Output` object.
"""
_fields = (
"message",
"lexer",
"completer",
"complete_in_thread",
"is_password",
"editing_mode",
"key_bindings",
"is_password",
"bottom_toolbar",
"style",
"style_transformation",
"swap_light_and_dark_colors",
"color_depth",
"include_default_pygments_style",
"rprompt",
"multiline",
"prompt_continuation",
"wrap_lines",
"enable_history_search",
"search_ignore_case",
"complete_while_typing",
"validate_while_typing",
"complete_style",
"mouse_support",
"auto_suggest",
"clipboard",
"validator",
"refresh_interval",
"input_processors",
"enable_system_prompt",
"enable_suspend",
"enable_open_in_editor",
"reserve_space_for_menu",
"tempfile_suffix",
"tempfile",
)
def __init__(
self,
message: AnyFormattedText = "",
*,
multiline: FilterOrBool = False,
wrap_lines: FilterOrBool = True,
is_password: FilterOrBool = False,
vi_mode: bool = False,
editing_mode: EditingMode = EditingMode.EMACS,
complete_while_typing: FilterOrBool = True,
validate_while_typing: FilterOrBool = True,
enable_history_search: FilterOrBool = False,
search_ignore_case: FilterOrBool = False,
lexer: Optional[Lexer] = None,
enable_system_prompt: FilterOrBool = False,
enable_suspend: FilterOrBool = False,
enable_open_in_editor: FilterOrBool = False,
validator: Optional[Validator] = None,
completer: Optional[Completer] = None,
complete_in_thread: bool = False,
reserve_space_for_menu: int = 8,
complete_style: CompleteStyle = CompleteStyle.COLUMN,
auto_suggest: Optional[AutoSuggest] = None,
style: Optional[BaseStyle] = None,
style_transformation: Optional[StyleTransformation] = None,
swap_light_and_dark_colors: FilterOrBool = False,
color_depth: Optional[ColorDepth] = None,
include_default_pygments_style: FilterOrBool = True,
history: Optional[History] = None,
clipboard: Optional[Clipboard] = None,
prompt_continuation: Optional[PromptContinuationText] = None,
rprompt: AnyFormattedText = None,
bottom_toolbar: AnyFormattedText = None,
mouse_support: FilterOrBool = False,
input_processors: Optional[List[Processor]] = None,
key_bindings: Optional[KeyBindingsBase] = None,
erase_when_done: bool = False,
tempfile_suffix: Optional[Union[str, Callable[[], str]]] = ".txt",
tempfile: Optional[Union[str, Callable[[], str]]] = None,
refresh_interval: float = 0,
input: Optional[Input] = None,
output: Optional[Output] = None,
) -> None:
history = history or InMemoryHistory()
clipboard = clipboard or InMemoryClipboard()
# Ensure backwards-compatibility, when `vi_mode` is passed.
if vi_mode:
editing_mode = EditingMode.VI
# Store all settings in this class.
self._input = input
self._output = output
# Store attributes.
# (All except 'editing_mode'.)
self.message = message
self.lexer = lexer
self.completer = completer
self.complete_in_thread = complete_in_thread
self.is_password = is_password
self.key_bindings = key_bindings
self.bottom_toolbar = bottom_toolbar
self.style = style
self.style_transformation = style_transformation
self.swap_light_and_dark_colors = swap_light_and_dark_colors
self.color_depth = color_depth
self.include_default_pygments_style = include_default_pygments_style
self.rprompt = rprompt
self.multiline = multiline
self.prompt_continuation = prompt_continuation
self.wrap_lines = wrap_lines
self.enable_history_search = enable_history_search
self.search_ignore_case = search_ignore_case
self.complete_while_typing = complete_while_typing
self.validate_while_typing = validate_while_typing
self.complete_style = complete_style
self.mouse_support = mouse_support
self.auto_suggest = auto_suggest
self.clipboard = clipboard
self.validator = validator
self.refresh_interval = refresh_interval
self.input_processors = input_processors
self.enable_system_prompt = enable_system_prompt
self.enable_suspend = enable_suspend
self.enable_open_in_editor = enable_open_in_editor
self.reserve_space_for_menu = reserve_space_for_menu
self.tempfile_suffix = tempfile_suffix
self.tempfile = tempfile
# Create buffers, layout and Application.
self.history = history
self.default_buffer = self._create_default_buffer()
self.search_buffer = self._create_search_buffer()
self.layout = self._create_layout()
self.app = self._create_application(editing_mode, erase_when_done)
def _dyncond(self, attr_name: str) -> Condition:
"""
Dynamically take this setting from this 'PromptSession' class.
`attr_name` represents an attribute name of this class. Its value
can either be a boolean or a `Filter`.
This returns something that can be used as either a `Filter`
or `Filter`.
"""
@Condition
def dynamic() -> bool:
value = cast(FilterOrBool, getattr(self, attr_name))
return to_filter(value)()
return dynamic
def _create_default_buffer(self) -> Buffer:
"""
Create and return the default input buffer.
"""
dyncond = self._dyncond
# Create buffers list.
def accept(buff: Buffer) -> bool:
""" Accept the content of the default buffer. This is called when
the validation succeeds. """
cast(Application[str], get_app()).exit(result=buff.document.text)
return True # Keep text, we call 'reset' later on.
return Buffer(
name=DEFAULT_BUFFER,
# Make sure that complete_while_typing is disabled when
# enable_history_search is enabled. (First convert to Filter,
# to avoid doing bitwise operations on bool objects.)
complete_while_typing=Condition(
lambda: is_true(self.complete_while_typing)
and not is_true(self.enable_history_search)
and not self.complete_style == CompleteStyle.READLINE_LIKE
),
validate_while_typing=dyncond("validate_while_typing"),
enable_history_search=dyncond("enable_history_search"),
validator=DynamicValidator(lambda: self.validator),
completer=DynamicCompleter(
lambda: ThreadedCompleter(self.completer)
if self.complete_in_thread and self.completer
else self.completer
),
history=self.history,
auto_suggest=DynamicAutoSuggest(lambda: self.auto_suggest),
accept_handler=accept,
tempfile_suffix=lambda: to_str(self.tempfile_suffix or ""),
tempfile=lambda: to_str(self.tempfile or ""),
)
def _create_search_buffer(self) -> Buffer:
return Buffer(name=SEARCH_BUFFER)
def _create_layout(self) -> Layout:
"""
Create `Layout` for this prompt.
"""
dyncond = self._dyncond
# Create functions that will dynamically split the prompt. (If we have
# a multiline prompt.)
(
has_before_fragments,
get_prompt_text_1,
get_prompt_text_2,
) = _split_multiline_prompt(self._get_prompt)
default_buffer = self.default_buffer
search_buffer = self.search_buffer
# Create processors list.
all_input_processors = [
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
ConditionalProcessor(
AppendAutoSuggestion(), has_focus(default_buffer) & ~is_done
),
ConditionalProcessor(PasswordProcessor(), dyncond("is_password")),
DisplayMultipleCursors(),
# Users can insert processors here.
DynamicProcessor(lambda: merge_processors(self.input_processors or [])),
]
# Create bottom toolbars.
bottom_toolbar = ConditionalContainer(
Window(
FormattedTextControl(
lambda: self.bottom_toolbar, style="class:bottom-toolbar.text"
),
style="class:bottom-toolbar",
dont_extend_height=True,
height=Dimension(min=1),
),
filter=~is_done
& renderer_height_is_known
& Condition(lambda: self.bottom_toolbar is not None),
)
search_toolbar = SearchToolbar(
search_buffer, ignore_case=dyncond("search_ignore_case")
)
search_buffer_control = SearchBufferControl(
buffer=search_buffer,
input_processors=[ReverseSearchProcessor(),],
ignore_case=dyncond("search_ignore_case"),
)
system_toolbar = SystemToolbar(
enable_global_bindings=dyncond("enable_system_prompt")
)
def get_search_buffer_control() -> SearchBufferControl:
" Return the UIControl to be focused when searching start. "
if is_true(self.multiline):
return search_toolbar.control
else:
return search_buffer_control
default_buffer_control = BufferControl(
buffer=default_buffer,
search_buffer_control=get_search_buffer_control,
input_processors=all_input_processors,
include_default_input_processors=False,
lexer=DynamicLexer(lambda: self.lexer),
preview_search=True,
)
default_buffer_window = Window(
default_buffer_control,
height=self._get_default_buffer_control_height,
get_line_prefix=partial(
self._get_line_prefix, get_prompt_text_2=get_prompt_text_2
),
wrap_lines=dyncond("wrap_lines"),
)
@Condition
def multi_column_complete_style() -> bool:
return self.complete_style == CompleteStyle.MULTI_COLUMN
# Build the layout.
layout = HSplit(
[
# The main input, with completion menus floating on top of it.
FloatContainer(
HSplit(
[
ConditionalContainer(
Window(
FormattedTextControl(get_prompt_text_1),
dont_extend_height=True,
),
Condition(has_before_fragments),
),
ConditionalContainer(
default_buffer_window,
Condition(
lambda: get_app().layout.current_control
!= search_buffer_control
),
),
ConditionalContainer(
Window(search_buffer_control),
Condition(
lambda: get_app().layout.current_control
== search_buffer_control
),
),
]
),
[
# Completion menus.
# NOTE: Especially the multi-column menu needs to be
# transparent, because the shape is not always
# rectangular due to the meta-text below the menu.
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=CompletionsMenu(
max_height=16,
scroll_offset=1,
extra_filter=has_focus(default_buffer)
& ~multi_column_complete_style,
),
),
Float(
xcursor=True,
ycursor=True,
transparent=True,
content=MultiColumnCompletionsMenu(
show_meta=True,
extra_filter=has_focus(default_buffer)
& multi_column_complete_style,
),
),
# The right prompt.
Float(
right=0,
bottom=0,
hide_when_covering_content=True,
content=_RPrompt(lambda: self.rprompt),
),
],
),
ConditionalContainer(ValidationToolbar(), filter=~is_done),
ConditionalContainer(
system_toolbar, dyncond("enable_system_prompt") & ~is_done
),
# In multiline mode, we use two toolbars for 'arg' and 'search'.
ConditionalContainer(
Window(FormattedTextControl(self._get_arg_text), height=1),
dyncond("multiline") & has_arg,
),
ConditionalContainer(search_toolbar, dyncond("multiline") & ~is_done),
bottom_toolbar,
]
)
return Layout(layout, default_buffer_window)
def _create_application(
self, editing_mode: EditingMode, erase_when_done: bool
) -> Application[_T]:
"""
Create the `Application` object.
"""
dyncond = self._dyncond
# Default key bindings.
auto_suggest_bindings = load_auto_suggest_bindings()
open_in_editor_bindings = load_open_in_editor_bindings()
prompt_bindings = self._create_prompt_bindings()
# Create application
application: Application[_T] = Application(
layout=self.layout,
style=DynamicStyle(lambda: self.style),
style_transformation=merge_style_transformations(
[
DynamicStyleTransformation(lambda: self.style_transformation),
ConditionalStyleTransformation(
SwapLightAndDarkStyleTransformation(),
dyncond("swap_light_and_dark_colors"),
),
]
),
include_default_pygments_style=dyncond("include_default_pygments_style"),
clipboard=DynamicClipboard(lambda: self.clipboard),
key_bindings=merge_key_bindings(
[
merge_key_bindings(
[
auto_suggest_bindings,
ConditionalKeyBindings(
open_in_editor_bindings,
dyncond("enable_open_in_editor")
& has_focus(DEFAULT_BUFFER),
),
prompt_bindings,
]
),
DynamicKeyBindings(lambda: self.key_bindings),
]
),
mouse_support=dyncond("mouse_support"),
editing_mode=editing_mode,
erase_when_done=erase_when_done,
reverse_vi_search_direction=True,
color_depth=lambda: self.color_depth,
refresh_interval=self.refresh_interval,
input=self._input,
output=self._output,
)
# During render time, make sure that we focus the right search control
# (if we are searching). - This could be useful if people make the
# 'multiline' property dynamic.
"""
def on_render(app):
multiline = is_true(self.multiline)
current_control = app.layout.current_control
if multiline:
if current_control == search_buffer_control:
app.layout.current_control = search_toolbar.control
app.invalidate()
else:
if current_control == search_toolbar.control:
app.layout.current_control = search_buffer_control
app.invalidate()
app.on_render += on_render
"""
return application
def _create_prompt_bindings(self) -> KeyBindings:
"""
Create the KeyBindings for a prompt application.
"""
kb = KeyBindings()
handle = kb.add
default_focused = has_focus(DEFAULT_BUFFER)
@Condition
def do_accept() -> bool:
return not is_true(self.multiline) and self.app.layout.has_focus(
DEFAULT_BUFFER
)
@handle("enter", filter=do_accept & default_focused)
def _accept_input(event: E) -> None:
" Accept input when enter has been pressed. "
self.default_buffer.validate_and_handle()
@Condition
def readline_complete_style() -> bool:
return self.complete_style == CompleteStyle.READLINE_LIKE
@handle("tab", filter=readline_complete_style & default_focused)
def _complete_like_readline(event: E) -> None:
" Display completions (like Readline). "
display_completions_like_readline(event)
@handle("c-c", filter=default_focused)
def _keyboard_interrupt(event: E) -> None:
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style="class:aborting")
@Condition
def ctrl_d_condition() -> bool:
""" Ctrl-D binding is only active when the default buffer is selected
and empty. """
app = get_app()
return (
app.current_buffer.name == DEFAULT_BUFFER
and not app.current_buffer.text
)
@handle("c-d", filter=ctrl_d_condition & default_focused)
def _eof(event: E) -> None:
" Exit when Control-D has been pressed. "
event.app.exit(exception=EOFError, style="class:exiting")
suspend_supported = Condition(suspend_to_background_supported)
@Condition
def enable_suspend() -> bool:
return to_filter(self.enable_suspend)()
@handle("c-z", filter=suspend_supported & enable_suspend)
def _suspend(event: E) -> None:
"""
Suspend process to background.
"""
event.app.suspend_to_background()
return kb
def prompt(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: Optional[AnyFormattedText] = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: Optional[EditingMode] = None,
refresh_interval: Optional[float] = None,
vi_mode: Optional[bool] = None,
lexer: Optional[Lexer] = None,
completer: Optional[Completer] = None,
complete_in_thread: Optional[bool] = None,
is_password: Optional[bool] = None,
key_bindings: Optional[KeyBindingsBase] = None,
bottom_toolbar: Optional[AnyFormattedText] = None,
style: Optional[BaseStyle] = None,
color_depth: Optional[ColorDepth] = None,
include_default_pygments_style: Optional[FilterOrBool] = None,
style_transformation: Optional[StyleTransformation] = None,
swap_light_and_dark_colors: Optional[FilterOrBool] = None,
rprompt: Optional[AnyFormattedText] = None,
multiline: Optional[FilterOrBool] = None,
prompt_continuation: Optional[PromptContinuationText] = None,
wrap_lines: Optional[FilterOrBool] = None,
enable_history_search: Optional[FilterOrBool] = None,
search_ignore_case: Optional[FilterOrBool] = None,
complete_while_typing: Optional[FilterOrBool] = None,
validate_while_typing: Optional[FilterOrBool] = None,
complete_style: Optional[CompleteStyle] = None,
auto_suggest: Optional[AutoSuggest] = None,
validator: Optional[Validator] = None,
clipboard: Optional[Clipboard] = None,
mouse_support: Optional[FilterOrBool] = None,
input_processors: Optional[List[Processor]] = None,
reserve_space_for_menu: Optional[int] = None,
enable_system_prompt: Optional[FilterOrBool] = None,
enable_suspend: Optional[FilterOrBool] = None,
enable_open_in_editor: Optional[FilterOrBool] = None,
tempfile_suffix: Optional[Union[str, Callable[[], str]]] = None,
tempfile: Optional[Union[str, Callable[[], str]]] = None,
# Following arguments are specific to the current `prompt()` call.
default: Union[str, Document] = "",
accept_default: bool = False,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> _T:
"""
Display the prompt.
The first set of arguments is a subset of the :class:`~.PromptSession`
class itself. For these, passing in ``None`` will keep the current
values that are active in the session. Passing in a value will set the
attribute for the session, which means that it applies to the current,
but also to the next prompts.
Note that in order to erase a ``Completer``, ``Validator`` or
``AutoSuggest``, you can't use ``None``. Instead pass in a
``DummyCompleter``, ``DummyValidator`` or ``DummyAutoSuggest`` instance
respectively. For a ``Lexer`` you can pass in an empty ``SimpleLexer``.
Additional arguments, specific for this prompt:
:param default: The default input text to be shown. (This can be edited
by the user).
:param accept_default: When `True`, automatically accept the default
value without allowing the user to edit the input.
:param pre_run: Callable, called at the start of `Application.run`.
This method will raise ``KeyboardInterrupt`` when control-c has been
pressed (for abort) and ``EOFError`` when control-d has been pressed
(for exit).
"""
# NOTE: We used to create a backup of the PromptSession attributes and
# restore them after exiting the prompt. This code has been
# removed, because it was confusing and didn't really serve a use
# case. (People were changing `Application.editing_mode`
# dynamically and surprised that it was reset after every call.)
# NOTE 2: YES, this is a lot of repeation below...
# However, it is a very convenient for a user to accept all
# these parameters in this `prompt` method as well. We could
# use `locals()` and `setattr` to avoid the repetition, but
# then we loose the advantage of mypy and pyflakes to be able
# to verify the code.
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
return get_event_loop().run_until_complete(self._dumb_prompt(self.message))
return self.app.run(set_exception_handler=set_exception_handler)
async def _dumb_prompt(self, message: AnyFormattedText = "") -> _T:
"""
Prompt function for dumb terminals.
Dumb terminals have minimum rendering capabilities. We can only print
text to the screen. We can't use colors, and we can't do cursor
movements. The Emacs inferior shell is an example of a dumb terminal.
We will show the prompt, and wait for the input. We still handle arrow
keys, and all custom key bindings, but we don't really render the
cursor movements. Instead we only print the typed character that's
right before the cursor.
"""
# Send prompt to output.
self.output.write(fragment_list_to_text(to_formatted_text(self.message)))
self.output.flush()
# Key bindings for the dumb prompt: mostly the same as the full prompt.
key_bindings: KeyBindingsBase = self._create_prompt_bindings()
if self.key_bindings:
key_bindings = merge_key_bindings([self.key_bindings, key_bindings])
# Create and run application.
application = cast(
Application[_T],
Application(
input=self.input,
output=DummyOutput(),
layout=self.layout,
key_bindings=key_bindings,
),
)
def on_text_changed(_) -> None:
self.output.write(self.default_buffer.document.text_before_cursor[-1:])
self.output.flush()
self.default_buffer.on_text_changed += on_text_changed
result = await application.run_async()
# Render line ending.
self.output.write("\r\n")
self.output.flush()
return result
async def prompt_async(
self,
# When any of these arguments are passed, this value is overwritten
# in this PromptSession.
message: Optional[AnyFormattedText] = None,
# `message` should go first, because people call it as
# positional argument.
*,
editing_mode: Optional[EditingMode] = None,
refresh_interval: Optional[float] = None,
vi_mode: Optional[bool] = None,
lexer: Optional[Lexer] = None,
completer: Optional[Completer] = None,
complete_in_thread: Optional[bool] = None,
is_password: Optional[bool] = None,
key_bindings: Optional[KeyBindingsBase] = None,
bottom_toolbar: Optional[AnyFormattedText] = None,
style: Optional[BaseStyle] = None,
color_depth: Optional[ColorDepth] = None,
include_default_pygments_style: Optional[FilterOrBool] = None,
style_transformation: Optional[StyleTransformation] = None,
swap_light_and_dark_colors: Optional[FilterOrBool] = None,
rprompt: Optional[AnyFormattedText] = None,
multiline: Optional[FilterOrBool] = None,
prompt_continuation: Optional[PromptContinuationText] = None,
wrap_lines: Optional[FilterOrBool] = None,
enable_history_search: Optional[FilterOrBool] = None,
search_ignore_case: Optional[FilterOrBool] = None,
complete_while_typing: Optional[FilterOrBool] = None,
validate_while_typing: Optional[FilterOrBool] = None,
complete_style: Optional[CompleteStyle] = None,
auto_suggest: Optional[AutoSuggest] = None,
validator: Optional[Validator] = None,
clipboard: Optional[Clipboard] = None,
mouse_support: Optional[FilterOrBool] = None,
input_processors: Optional[List[Processor]] = None,
reserve_space_for_menu: Optional[int] = None,
enable_system_prompt: Optional[FilterOrBool] = None,
enable_suspend: Optional[FilterOrBool] = None,
enable_open_in_editor: Optional[FilterOrBool] = None,
tempfile_suffix: Optional[Union[str, Callable[[], str]]] = None,
tempfile: Optional[Union[str, Callable[[], str]]] = None,
# Following arguments are specific to the current `prompt()` call.
default: Union[str, Document] = "",
accept_default: bool = False,
pre_run: Optional[Callable[[], None]] = None,
set_exception_handler: bool = True,
) -> _T:
if message is not None:
self.message = message
if editing_mode is not None:
self.editing_mode = editing_mode
if refresh_interval is not None:
self.refresh_interval = refresh_interval
if vi_mode:
self.editing_mode = EditingMode.VI
if lexer is not None:
self.lexer = lexer
if completer is not None:
self.completer = completer
if complete_in_thread is not None:
self.complete_in_thread = complete_in_thread
if is_password is not None:
self.is_password = is_password
if key_bindings is not None:
self.key_bindings = key_bindings
if bottom_toolbar is not None:
self.bottom_toolbar = bottom_toolbar
if style is not None:
self.style = style
if color_depth is not None:
self.color_depth = color_depth
if include_default_pygments_style is not None:
self.include_default_pygments_style = include_default_pygments_style
if style_transformation is not None:
self.style_transformation = style_transformation
if swap_light_and_dark_colors is not None:
self.swap_light_and_dark_colors = swap_light_and_dark_colors
if rprompt is not None:
self.rprompt = rprompt
if multiline is not None:
self.multiline = multiline
if prompt_continuation is not None:
self.prompt_continuation = prompt_continuation
if wrap_lines is not None:
self.wrap_lines = wrap_lines
if enable_history_search is not None:
self.enable_history_search = enable_history_search
if search_ignore_case is not None:
self.search_ignore_case = search_ignore_case
if complete_while_typing is not None:
self.complete_while_typing = complete_while_typing
if validate_while_typing is not None:
self.validate_while_typing = validate_while_typing
if complete_style is not None:
self.complete_style = complete_style
if auto_suggest is not None:
self.auto_suggest = auto_suggest
if validator is not None:
self.validator = validator
if clipboard is not None:
self.clipboard = clipboard
if mouse_support is not None:
self.mouse_support = mouse_support
if input_processors is not None:
self.input_processors = input_processors
if reserve_space_for_menu is not None:
self.reserve_space_for_menu = reserve_space_for_menu
if enable_system_prompt is not None:
self.enable_system_prompt = enable_system_prompt
if enable_suspend is not None:
self.enable_suspend = enable_suspend
if enable_open_in_editor is not None:
self.enable_open_in_editor = enable_open_in_editor
if tempfile_suffix is not None:
self.tempfile_suffix = tempfile_suffix
if tempfile is not None:
self.tempfile = tempfile
self._add_pre_run_callables(pre_run, accept_default)
self.default_buffer.reset(
default if isinstance(default, Document) else Document(default)
)
self.app.refresh_interval = self.refresh_interval # This is not reactive.
# If we are using the default output, and have a dumb terminal. Use the
# dumb prompt.
if self._output is None and is_dumb_terminal():
return await self._dumb_prompt(self.message)
return await self.app.run_async(set_exception_handler=set_exception_handler)
def _add_pre_run_callables(
self, pre_run: Optional[Callable[[], None]], accept_default: bool
) -> None:
def pre_run2() -> None:
if pre_run:
pre_run()
if accept_default:
# Validate and handle input. We use `call_from_executor` in
# order to run it "soon" (during the next iteration of the
# event loop), instead of right now. Otherwise, it won't
# display the default value.
get_event_loop().call_soon(self.default_buffer.validate_and_handle)
self.app.pre_run_callables.append(pre_run2)
@property
def editing_mode(self) -> EditingMode:
return self.app.editing_mode
@editing_mode.setter
def editing_mode(self, value: EditingMode) -> None:
self.app.editing_mode = value
def _get_default_buffer_control_height(self) -> Dimension:
# If there is an autocompletion menu to be shown, make sure that our
# layout has at least a minimal height in order to display it.
if (
self.completer is not None
and self.complete_style != CompleteStyle.READLINE_LIKE
):
space = self.reserve_space_for_menu
else:
space = 0
if space and not get_app().is_done:
buff = self.default_buffer
# Reserve the space, either when there are completions, or when
# `complete_while_typing` is true and we expect completions very
# soon.
if buff.complete_while_typing() or buff.complete_state is not None:
return Dimension(min=space)
return Dimension()
def _get_prompt(self) -> StyleAndTextTuples:
return to_formatted_text(self.message, style="class:prompt")
def _get_continuation(
self, width: int, line_number: int, wrap_count: int
) -> StyleAndTextTuples:
"""
Insert the prompt continuation.
:param width: The width that was used for the prompt. (more or less can
be used.)
:param line_number:
:param wrap_count: Amount of times that the line has been wrapped.
"""
prompt_continuation = self.prompt_continuation
if callable(prompt_continuation):
continuation: AnyFormattedText = prompt_continuation(
width, line_number, wrap_count
)
else:
continuation = prompt_continuation
# When the continuation prompt is not given, choose the same width as
# the actual prompt.
if continuation is None and is_true(self.multiline):
continuation = " " * width
return to_formatted_text(continuation, style="class:prompt-continuation")
def _get_line_prefix(
self,
line_number: int,
wrap_count: int,
get_prompt_text_2: _StyleAndTextTuplesCallable,
) -> StyleAndTextTuples:
"""
Return whatever needs to be inserted before every line.
(the prompt, or a line continuation.)
"""
# First line: display the "arg" or the prompt.
if line_number == 0 and wrap_count == 0:
if not is_true(self.multiline) and get_app().key_processor.arg is not None:
return self._inline_arg()
else:
return get_prompt_text_2()
# For the next lines, display the appropriate continuation.
prompt_width = get_cwidth(fragment_list_to_text(get_prompt_text_2()))
return self._get_continuation(prompt_width, line_number, wrap_count)
def _get_arg_text(self) -> StyleAndTextTuples:
" 'arg' toolbar, for in multiline mode. "
arg = self.app.key_processor.arg
if arg is None:
# Should not happen because of the `has_arg` filter in the layout.
return []
if arg == "-":
arg = "-1"
return [("class:arg-toolbar", "Repeat: "), ("class:arg-toolbar.text", arg)]
def _inline_arg(self) -> StyleAndTextTuples:
" 'arg' prefix, for in single line mode. "
app = get_app()
if app.key_processor.arg is None:
return []
else:
arg = app.key_processor.arg
return [
("class:prompt.arg", "(arg: "),
("class:prompt.arg.text", str(arg)),
("class:prompt.arg", ") "),
]
# Expose the Input and Output objects as attributes, mainly for
# backward-compatibility.
@property
def input(self) -> Input:
return self.app.input
@property
def output(self) -> Output:
return self.app.output
def prompt(
message: Optional[AnyFormattedText] = None,
*,
history: Optional[History] = None,
editing_mode: Optional[EditingMode] = None,
refresh_interval: Optional[float] = None,
vi_mode: Optional[bool] = None,
lexer: Optional[Lexer] = None,
completer: Optional[Completer] = None,
complete_in_thread: Optional[bool] = None,
is_password: Optional[bool] = None,
key_bindings: Optional[KeyBindingsBase] = None,
bottom_toolbar: Optional[AnyFormattedText] = None,
style: Optional[BaseStyle] = None,
color_depth: Optional[ColorDepth] = None,
include_default_pygments_style: Optional[FilterOrBool] = None,
style_transformation: Optional[StyleTransformation] = None,
swap_light_and_dark_colors: Optional[FilterOrBool] = None,
rprompt: Optional[AnyFormattedText] = None,
multiline: Optional[FilterOrBool] = None,
prompt_continuation: Optional[PromptContinuationText] = None,
wrap_lines: Optional[FilterOrBool] = None,
enable_history_search: Optional[FilterOrBool] = None,
search_ignore_case: Optional[FilterOrBool] = None,
complete_while_typing: Optional[FilterOrBool] = None,
validate_while_typing: Optional[FilterOrBool] = None,
complete_style: Optional[CompleteStyle] = None,
auto_suggest: Optional[AutoSuggest] = None,
validator: Optional[Validator] = None,
clipboard: Optional[Clipboard] = None,
mouse_support: Optional[FilterOrBool] = None,
input_processors: Optional[List[Processor]] = None,
reserve_space_for_menu: Optional[int] = None,
enable_system_prompt: Optional[FilterOrBool] = None,
enable_suspend: Optional[FilterOrBool] = None,
enable_open_in_editor: Optional[FilterOrBool] = None,
tempfile_suffix: Optional[Union[str, Callable[[], str]]] = None,
tempfile: Optional[Union[str, Callable[[], str]]] = None,
# Following arguments are specific to the current `prompt()` call.
default: str = "",
accept_default: bool = False,
pre_run: Optional[Callable[[], None]] = None,
) -> str:
"""
The global `prompt` function. This will create a new `PromptSession`
instance for every call.
"""
# The history is the only attribute that has to be passed to the
# `PromptSession`, it can't be passed into the `prompt()` method.
session: PromptSession[str] = PromptSession(history=history)
return session.prompt(
message,
editing_mode=editing_mode,
refresh_interval=refresh_interval,
vi_mode=vi_mode,
lexer=lexer,
completer=completer,
complete_in_thread=complete_in_thread,
is_password=is_password,
key_bindings=key_bindings,
bottom_toolbar=bottom_toolbar,
style=style,
color_depth=color_depth,
include_default_pygments_style=include_default_pygments_style,
style_transformation=style_transformation,
swap_light_and_dark_colors=swap_light_and_dark_colors,
rprompt=rprompt,
multiline=multiline,
prompt_continuation=prompt_continuation,
wrap_lines=wrap_lines,
enable_history_search=enable_history_search,
search_ignore_case=search_ignore_case,
complete_while_typing=complete_while_typing,
validate_while_typing=validate_while_typing,
complete_style=complete_style,
auto_suggest=auto_suggest,
validator=validator,
clipboard=clipboard,
mouse_support=mouse_support,
input_processors=input_processors,
reserve_space_for_menu=reserve_space_for_menu,
enable_system_prompt=enable_system_prompt,
enable_suspend=enable_suspend,
enable_open_in_editor=enable_open_in_editor,
tempfile_suffix=tempfile_suffix,
tempfile=tempfile,
default=default,
accept_default=accept_default,
pre_run=pre_run,
)
prompt.__doc__ = PromptSession.prompt.__doc__
def create_confirm_session(
message: str, suffix: str = " (y/n) "
) -> PromptSession[bool]:
"""
Create a `PromptSession` object for the 'confirm' function.
"""
bindings = KeyBindings()
@bindings.add("y")
@bindings.add("Y")
def yes(event: E) -> None:
session.default_buffer.text = "y"
event.app.exit(result=True)
@bindings.add("n")
@bindings.add("N")
def no(event: E) -> None:
session.default_buffer.text = "n"
event.app.exit(result=False)
@bindings.add(Keys.Any)
def _(event: E) -> None:
" Disallow inserting other text. "
pass
complete_message = merge_formatted_text([message, suffix])
session: PromptSession[bool] = PromptSession(
complete_message, key_bindings=bindings
)
return session
def confirm(message: str = "Confirm?", suffix: str = " (y/n) ") -> bool:
"""
Display a confirmation prompt that returns True/False.
"""
session = create_confirm_session(message, suffix)
return session.prompt()
|
|
#
# Copyright (c) 2016 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.nec import volume_common
from cinder.volume.drivers.nec import volume_helper
xml_out = '''
<REQUEST>
<CMD_REQUEST cmd_name="/opt/iSMCliGateway/impl/query/iSMquery"
arg="-cinder -xml -all "
version="Version 9.4.001">
<CHAPTER name="Disk Array">
<OBJECT name="Disk Array">
<SECTION name="Disk Array Detail Information">
<UNIT name="Product ID">M310</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Logical Disk">
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0000</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">MV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0001</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">backup_SDV0001</UNIT>
<UNIT name="LD Capacity">5368709120</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0003</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">31HxzqBiAFTUxxOlcVn3EA</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0004</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT_back</UNIT>
<UNIT name="LD Capacity">1073741824</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0005</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">20000009910200140005</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">RV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0006</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">287RbQoP7VdwR1WsPC2fZT_l</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0007</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140007</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0008</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140008</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0009</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">20000009910200140009</UNIT>
<UNIT name="LD Capacity">10737418240</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000a</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000A</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000b</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000B</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000c</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">2000000991020012000C</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000d</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">yEUHrXa5AHMjOZZLb93eP</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">---</UNIT>
<UNIT name="RPL Attribute">IV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">000e</UNIT>
<UNIT name="OS Type">LX</UNIT>
<UNIT name="LD Name">4T7JpyqI3UuPlKeT9D3VQF</UNIT>
<UNIT name="LD Capacity">6442450944</UNIT>
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">SV</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Logical Disk">
<SECTION name="LD Detail Information">
<UNIT name="LDN(h)">0fff</UNIT>
<UNIT name="OS Type"> </UNIT>
<UNIT name="LD Name">Pool0000_SYV0FFF</UNIT>
<UNIT name="LD Capacity">8589934592</UNIT>
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Purpose">(invalid attribute)</UNIT>
<UNIT name="RPL Attribute">---</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Pool">
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0000</UNIT>
<UNIT name="Pool Capacity">281320357888</UNIT>
<UNIT name="Used Pool Capacity">84020297728</UNIT>
<UNIT name="Free Pool Capacity">197300060160</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0001</UNIT>
<UNIT name="Pool Capacity">89657442304</UNIT>
<UNIT name="Used Pool Capacity">6710886400</UNIT>
<UNIT name="Free Pool Capacity">82946555904</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0002</UNIT>
<UNIT name="Pool Capacity">1950988894208</UNIT>
<UNIT name="Used Pool Capacity">18446744073441116160</UNIT>
<UNIT name="Free Pool Capacity">1951257329664</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Pool">
<SECTION name="Pool Detail Information">
<UNIT name="Pool No.(h)">0003</UNIT>
<UNIT name="Pool Capacity">1950988894208</UNIT>
<UNIT name="Used Pool Capacity">18446744073441116160</UNIT>
<UNIT name="Free Pool Capacity">1951257329664</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Controller">
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-00</UNIT>
<UNIT name="WWPN">2100000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-01</UNIT>
<UNIT name="WWPN">2200000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-02</UNIT>
<UNIT name="IP Address">192.168.1.90</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">00-03</UNIT>
<UNIT name="IP Address">192.168.1.91</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-00</UNIT>
<UNIT name="WWPN">2900000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-01</UNIT>
<UNIT name="WWPN">2A00000991020012</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-02</UNIT>
<UNIT name="IP Address">192.168.2.92</UNIT>
<UNIT name="Link Status">Link Down</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="Host Port">
<SECTION name="Host Director/Host Port Information">
<UNIT name="Port No.(h)">01-03</UNIT>
<UNIT name="IP Address">192.168.2.93</UNIT>
<UNIT name="Link Status">Link Up</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<CHAPTER name="Access Control">
<OBJECT name="LD Set(FC)">
<SECTION name="LD Set(FC) Information">
<UNIT name="Platform">LX</UNIT>
<UNIT name="LD Set Name">OpenStack1</UNIT>
</SECTION>
<SECTION name="Path List">
<UNIT name="Path">1000-0090-FAA0-786B</UNIT>
</SECTION>
<SECTION name="Path List">
<UNIT name="Path">1000-0090-FAA0-786A</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0000</UNIT>
<UNIT name="LDN(h)">0005</UNIT>
</SECTION>
<SECTION name="LUN/LD List">
<UNIT name="LUN(h)">0001</UNIT>
<UNIT name="LDN(h)">0006</UNIT>
</SECTION>
</OBJECT>
<OBJECT name="LD Set(iSCSI)">
<SECTION name="LD Set(iSCSI) Information">
<UNIT name="Platform">LX</UNIT>
<UNIT name="LD Set Name">OpenStack0</UNIT>
<UNIT name="Target Mode">Multi-Target</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.1.90:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.1.91:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.2.92:3260</UNIT>
</SECTION>
<SECTION name="Portal">
<UNIT name="Portal">192.168.2.93:3260</UNIT>
</SECTION>
<SECTION name="Initiator List">
<UNIT name="Initiator List">iqn.1994-05.com.redhat:d1d8e8f23255</UNIT>
</SECTION>
<SECTION name="Target Information For Multi-Target Mode">
<UNIT name="Target Name">iqn.2001-03.target0000</UNIT>
<UNIT name="LUN(h)">0000</UNIT>
<UNIT name="LDN(h)">0000</UNIT>
</SECTION>
</OBJECT>
</CHAPTER>
<RETURN_MSG>Command Completed Successfully!!</RETURN_MSG>
<RETURN_CODE>0</RETURN_CODE>
</CMD_REQUEST>
</REQUEST>
'''
def patch_view_all(self, conf_ismview_path=None, delete_ismview=True,
cmd_lock=True):
return xml_out
def patch_execute(self, command, expected_status=[0], raise_exec=True):
return "success", 0, 0
class DummyVolume(object):
def __init__(self):
super(DummyVolume, self).__init__()
self.id = ''
self.size = 0
self.status = ''
self.migration_status = ''
self.volume_id = ''
self.volume_type_id = ''
self.attach_status = ''
self.provider_location = ''
@ddt.ddt
class VolumeIDConvertTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(VolumeIDConvertTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR"))
@ddt.unpack
def test_volumeid_should_change_62scale(self, volid, ldname):
self.vol.id = volid
actual = self._convert_id2name(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
@ddt.data(("AAAAAAAA", "LX:37mA82"), ("BBBBBBBB", "LX:3R9ZwR"))
@ddt.unpack
def test_snap_volumeid_should_change_62scale_andpostfix(self,
volid,
ldname):
self.vol.id = volid
actual = self._convert_id2snapname(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
@ddt.data(("AAAAAAAA", "LX:37mA82_m"), ("BBBBBBBB", "LX:3R9ZwR_m"))
@ddt.unpack
def test_ddrsnap_volumeid_should_change_62scale_and_m(self,
volid,
ldname):
self.vol.id = volid
actual = self._convert_id2migratename(self.vol)
self.assertEqual(ldname, actual,
"ID:%(volid)s should be change to %(ldname)s" %
{'volid': volid, 'ldname': ldname})
class NominatePoolLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(NominatePoolLDTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
self._properties['cli_fip'] = '10.0.0.1'
self._properties['pool_pools'] = {0, 1}
self._properties['pool_backup_pools'] = {2, 3}
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
self._numofld_per_pool = 1024
def test_getxml(self):
self.assertIsNotNone(self.xml, "iSMview xml should not be None")
def test_selectldn_for_normalvolume(self):
ldn = self._select_ldnumber(self.used_ldns, self.max_ld_count)
self.assertEqual(2, ldn, "selected ldn should be XXX")
def test_selectpool_for_normalvolume(self):
self.vol.size = 10
pool = self._select_leastused_poolnumber(self.vol,
self.pools,
self.xml)
self.assertEqual(1, pool, "selected pool should be 1")
# config:pool_pools=[1]
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_leastused_poolnumber(self.vol,
self.pools,
self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_selectpool_for_migratevolume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
dummyhost = {}
dummyhost['capabilities'] = self._update_volume_status()
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
self.assertEqual(1, pool, "selected pool should be 1")
self.vol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.vol.size = 10
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
self.assertEqual(-1, pool, "selected pool is the same pool(return -1)")
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_migrate_poolnumber(self.vol,
self.pools,
self.xml,
dummyhost)
def test_selectpool_for_snapvolume(self):
self.vol.size = 10
savePool1 = self.pools[1]['free']
self.pools[1]['free'] = 0
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
self.pools[1]['free'] = savePool1
if len(self.pools[0]['ld_list']) is 1024:
savePool2 = self.pools[2]['free']
savePool3 = self.pools[3]['free']
self.pools[2]['free'] = 0
self.pools[3]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.pools[2]['free'] = savePool2
self.pools[3]['free'] = savePool3
self.vol.size = 999999999999
pool = self._select_dsv_poolnumber(self.vol, self.pools)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
def test_selectpool_for_ddrvolume(self):
self.vol.size = 10
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.assertEqual(2, pool, "selected pool should be 2")
# config:pool_backup_pools=[2]
savePool2 = self.pools[2]['free']
savePool3 = self.pools[3]['free']
self.pools[2]['free'] = 0
self.pools[3]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.pools[2]['free'] = savePool2
self.pools[3]['free'] = savePool3
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_ddr_poolnumber(self.vol,
self.pools,
self.xml,
999999999999)
def test_selectpool_for_volddrvolume(self):
self.vol.size = 10
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.assertEqual(1, pool, "selected pool should be 1")
# config:pool_backup_pools=[2]
savePool0 = self.pools[0]['free']
savePool1 = self.pools[1]['free']
self.pools[0]['free'] = 0
self.pools[1]['free'] = 0
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
10)
self.pools[0]['free'] = savePool0
self.pools[1]['free'] = savePool1
self.vol.size = 999999999999
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'No available pools found.'):
pool = self._select_volddr_poolnumber(self.vol,
self.pools,
self.xml,
999999999999)
class GetInformationTest(volume_helper.MStorageDSVDriver, test.TestCase):
def setUp(self):
super(GetInformationTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_ldset(self):
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
self._properties['ldset_name'] = ''
ldset = self.get_ldset(self.ldsets)
self.assertIsNone(ldset)
self._properties['ldset_name'] = 'LX:OpenStack1'
ldset = self.get_ldset(self.ldsets)
self.assertEqual('LX:OpenStack1', ldset['ldsetname'])
self._properties['ldset_name'] = 'LX:OpenStackX'
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk Set'
' `LX:OpenStackX`'
' could not be found.'):
self.get_ldset(self.ldsets)
class VolumeCreateTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(VolumeCreateTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
def test_validate_migrate_volume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'available'
self._validate_migrate_volume(self.vol, self.xml)
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'creating'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'Specified Logical Disk'
' LX:287RbQoP7VdwR1WsPC2fZT'
' is not available.'):
self._validate_migrate_volume(self.vol, self.xml)
self.vol.id = "AAAAAAAA"
self.vol.size = 10
self.vol.status = 'available'
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk `LX:37mA82`'
' could not be found.'):
self._validate_migrate_volume(self.vol, self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_extend_volume(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b" # MV
self.vol.size = 1
self.vol.status = 'available'
self.extend_volume(self.vol, 10)
self.vol.id = "00046058-d38e-7f60-67b7-59ed65e54225" # RV
self.vol.size = 1
self.vol.status = 'available'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
'RPL Attribute Error. '
'RPL Attribute = RV.'):
self.extend_volume(self.vol, 10)
class BindLDTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(BindLDTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.src = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
mock_bindld = mock.Mock()
self._bind_ld = mock_bindld
self._bind_ld.return_value = 0, 0, 0
def test_bindld_CreateVolume(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.vol.volume_type_id = None
self.create_volume(self.vol)
self._bind_ld.assert_called_once_with(
self.vol, self.vol.size, None,
self._convert_id2name,
self._select_leastused_poolnumber)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_bindld_CreateCloneVolume(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.src.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.src.size = 1
self.vol.volume_type_id = None
mock_query_DSV = mock.Mock()
self._cli.query_BV_SV_status = mock_query_DSV
self._cli.query_BV_SV_status.return_value = 'snap/active'
mock_query_DDR = mock.Mock()
self._cli.query_MV_RV_name = mock_query_DDR
self._cli.query_MV_RV_name.return_value = 'separated'
mock_backup = mock.Mock()
self._cli.backup_restore = mock_backup
self.create_cloned_volume(self.vol, self.src)
self._bind_ld.assert_called_once_with(
self.vol, self.vol.size, None,
self._convert_id2name,
self._select_leastused_poolnumber)
class BindLDTest_Snap(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(BindLDTest_Snap, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.snap = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
mock_bindld = mock.Mock()
self._bind_ld = mock_bindld
self._bind_ld.return_value = 0, 0, 0
mock_bindsnap = mock.Mock()
self._create_snapshot = mock_bindsnap
def test_bindld_CreateSnapshot(self):
self.snap.id = "AAAAAAAA"
self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.snap.size = 10
self.create_snapshot(self.snap)
self._create_snapshot.assert_called_once_with(
self.snap, self._properties['diskarray_name'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_bindld_CreateFromSnapshot(self):
self.vol.id = "AAAAAAAA"
self.vol.size = 1
self.vol.migration_status = "success"
self.vol.volume_type_id = None
self.snap.id = "63410c76-2f12-4473-873d-74a63dfcd3e2"
self.snap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_query = mock.Mock()
self._cli.query_BV_SV_status = mock_query
self._cli.query_BV_SV_status.return_value = 'snap/active'
mock_backup = mock.Mock()
self._cli.backup_restore = mock_backup
self.create_volume_from_snapshot(self.vol, self.snap)
self._bind_ld.assert_called_once_with(
self.vol, 1, None,
self._convert_id2name,
self._select_volddr_poolnumber, 1)
class ExportTest(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(ExportTest, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_portal(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = None
self.vol.migration_status = None
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self.iscsi_do_export(None, self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_do_export(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = None
self.vol.migration_status = None
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
self.fc_do_export(None, self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_remove_export(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.size = 10
self.vol.status = 'uploading'
self.vol.attach_status = 'attached'
self.vol.migration_status = None
self.vol.volume_type_id = None
context = mock.Mock()
ret = self.remove_export(context, self.vol)
self.assertIsNone(ret)
self.vol.attach_status = None
self.vol.status = 'downloading'
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
r'Failed to unregister Logical Disk from'
r' Logical Disk Set \(iSM31064\)'):
mock_del = mock.Mock()
self._cli.delldsetld = mock_del
self._cli.delldsetld.return_value = False, 'iSM31064'
self.remove_export(context, self.vol)
def test_iscsi_initialize_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
self.vol.provider_location = loc
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255",
'multipath': True}
info = self._iscsi_initialize_connection(self.vol, connector)
self.assertEqual('iscsi', info['driver_volume_type'])
self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
info['data']['target_iqn'])
self.assertEqual('127.0.0.1:3260', info['data']['target_portal'])
self.assertEqual(88, info['data']['target_lun'])
self.assertEqual('iqn.2010-10.org.openstack:volume-00000001',
info['data']['target_iqns'][0])
self.assertEqual('127.0.0.1:3260', info['data']['target_portals'][0])
self.assertEqual(88, info['data']['target_luns'][0])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_terminate_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255",
'multipath': True}
ret = self._iscsi_terminate_connection(self.vol, connector)
self.assertIsNone(ret)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_terminate_connection_negative(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255",
'multipath': True}
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
r'Failed to unregister Logical Disk from'
r' Logical Disk Set \(iSM31064\)'):
mock_del = mock.Mock()
self._cli.delldsetld = mock_del
self._cli.delldsetld.return_value = False, 'iSM31064'
self._iscsi_terminate_connection(self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_initialize_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.migration_status = None
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
info = self._fc_initialize_connection(self.vol, connector)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][0])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][0])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][1])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][1])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][2])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][2])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][3])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][3])
with self.assertRaisesRegexp(exception.VolumeBackendAPIException,
r'Failed to unregister Logical Disk from'
r' Logical Disk Set \(iSM31064\)'):
mock_del = mock.Mock()
self._cli.delldsetld = mock_del
self._cli.delldsetld.return_value = False, 'iSM31064'
self._fc_terminate_connection(self.vol, connector)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_terminate_connection(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
info = self._fc_terminate_connection(self.vol, connector)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual('2100000991020012', info['data']['target_wwn'][0])
self.assertEqual('2200000991020012', info['data']['target_wwn'][1])
self.assertEqual('2900000991020012', info['data']['target_wwn'][2])
self.assertEqual('2A00000991020012', info['data']['target_wwn'][3])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][0])
self.assertEqual(
'2100000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][0])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][1])
self.assertEqual(
'2200000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][1])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][2])
self.assertEqual(
'2900000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][2])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786A'][3])
self.assertEqual(
'2A00000991020012',
info['data']['initiator_target_map']['10000090FAA0786B'][3])
info = self._fc_terminate_connection(self.vol, None)
self.assertEqual('fibre_channel', info['driver_volume_type'])
self.assertEqual({}, info['data'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_iscsi_portal_with_controller_node_name(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.status = 'downloading'
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self._properties['ldset_controller_node_name'] = 'LX:OpenStack1'
self._properties['portal_number'] = 2
location = self.iscsi_do_export(None, self.vol, connector)
self.assertEqual('192.168.1.90:3260;192.168.1.91:3260;'
'192.168.2.92:3260;192.168.2.93:3260'
',1 iqn.2001-03.target0000 0',
location['provider_location'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_fc_do_export_with_controller_node_name(self):
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.vol.status = 'downloading'
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
self._properties['ldset_controller_node_name'] = 'LX:OpenStack0'
location = self.fc_do_export(None, self.vol, connector)
self.assertIsNone(location)
class DeleteDSVVolume_test(volume_helper.MStorageDSVDriver,
test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(DeleteDSVVolume_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.view_all',
patch_view_all)
def test_delete_snapshot(self):
self.vol.id = "63410c76-2f12-4473-873d-74a63dfcd3e2"
self.vol.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_query = mock.Mock()
self._cli.query_BV_SV_status = mock_query
self._cli.query_BV_SV_status.return_value = 'snap/active'
ret = self.delete_snapshot(self.vol)
self.assertIsNone(ret)
class NonDisruptiveBackup_test(volume_helper.MStorageDSVDriver,
test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def setUp(self):
super(NonDisruptiveBackup_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.vol = DummyVolume()
self.vol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.volvolume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
self.volsize = 10
self.volstatus = None
self.volmigration_status = None
self.xml = self._cli.view_all()
(self.pools,
self.lds,
self.ldsets,
self.used_ldns,
self.hostports,
self.max_ld_count) = self.configs(self.xml)
def test_validate_ld_exist(self):
ldname = self._validate_ld_exist(
self.lds, self.vol.id, self._properties['ld_name_format'])
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT', ldname)
self.vol.id = "00000000-0000-0000-0000-6b6d96553b4b"
with self.assertRaisesRegexp(exception.NotFound,
'Logical Disk `LX:XXXXXXXX`'
' could not be found.'):
self._validate_ld_exist(
self.lds, self.vol.id, self._properties['ld_name_format'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', new=mock.Mock())
def test_validate_iscsildset_exist(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f232XX"}
mock_data = {'ldsetname': 'LX:redhatd1d8e8f23',
'protocol': 'iSCSI',
'portal_list': ['1.1.1.1:3260', '2.2.2.2:3260'],
'lds': {},
'initiator_list':
['iqn.1994-05.com.redhat:d1d8e8f232XX']}
mock_ldset = {}
mock_ldset['LX:redhatd1d8e8f23'] = mock_data
mock_configs = mock.Mock()
self.configs = mock_configs
self.configs.return_value = None, None, mock_ldset, None, None, None
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:redhatd1d8e8f23', ldset['ldsetname'])
self.assertEqual('iqn.1994-05.com.redhat:d1d8e8f232XX',
ldset['initiator_list'][0])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', new=mock.Mock())
def test_validate_fcldset_exist(self):
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ldset = self._validate_fcldset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack1', ldset['ldsetname'])
connector = {'wwpns': ["10000090FAA0786X", "10000090FAA0786Y"]}
mock_data = {'ldsetname': 'LX:10000090FAA0786X',
'lds': {},
'protocol': 'FC',
'wwpn': ["1000-0090-FAA0-786X", "1000-0090-FAA0-786Y"],
'port': []}
mock_ldset = {}
mock_ldset['LX:10000090FAA0786X'] = mock_data
mock_configs = mock.Mock()
self.configs = mock_configs
self.configs.return_value = None, None, mock_ldset, None, None, None
ldset = self._validate_fcldset_exist(self.ldsets, connector)
self.assertEqual('LX:10000090FAA0786X', ldset['ldsetname'])
self.assertEqual('1000-0090-FAA0-786X', ldset['wwpn'][0])
self.assertEqual('1000-0090-FAA0-786Y', ldset['wwpn'][1])
def test_enumerate_iscsi_portals(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
ldset = self._validate_iscsildset_exist(self.ldsets, connector)
self.assertEqual('LX:OpenStack0', ldset['ldsetname'])
self._properties['portal_number'] = 2
portal = self._enumerate_iscsi_portals(self.hostports, ldset)
self.assertEqual('192.168.1.90:3260', portal[0])
self.assertEqual('192.168.1.91:3260', portal[1])
self.assertEqual('192.168.2.92:3260', portal[2])
self.assertEqual('192.168.2.93:3260', portal[3])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_initialize_connection_snapshot(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
loc = "127.0.0.1:3260:1 iqn.2010-10.org.openstack:volume-00000001 88"
self.vol.provider_location = loc
ret = self.iscsi_initialize_connection_snapshot(self.vol, connector)
self.assertIsNotNone(ret)
self.assertEqual('iscsi', ret['driver_volume_type'])
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ret = self.fc_initialize_connection_snapshot(self.vol, connector)
self.assertIsNotNone(ret)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_terminate_connection_snapshot(self):
connector = {'initiator': "iqn.1994-05.com.redhat:d1d8e8f23255"}
self.iscsi_terminate_connection_snapshot(self.vol, connector)
connector = {'wwpns': ["10000090FAA0786A", "10000090FAA0786B"]}
ret = self.fc_terminate_connection_snapshot(self.vol, connector)
self.assertEqual('fibre_channel', ret['driver_volume_type'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_remove_export_snapshot(self):
self.remove_export_snapshot(None, self.vol)
def test_backup_use_temp_snapshot(self):
ret = self.backup_use_temp_snapshot()
self.assertTrue(ret)
class VolumeStats_test(volume_helper.MStorageDSVDriver, test.TestCase):
def setUp(self):
super(VolumeStats_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['cli_fip'] = '10.0.0.1'
self._properties['pool_pools'] = {0, 1}
self._properties['pool_backup_pools'] = {2, 3}
def test_update_volume_status(self):
self.mock_object(volume_common.MStorageVolumeCommon, 'parse_xml',
side_effect=Exception)
stats = self._update_volume_status()
self.assertEqual('dummy', stats.get('volume_backend_name'))
self.assertEqual('NEC', stats.get('vendor_name'))
self.assertEqual(self.VERSION, stats.get('driver_version'))
self.assertEqual('10.0.0.1', stats.get('location_info').split(':')[0])
self.assertEqual('0,1', stats.get('location_info').split(':')[1])
class Migrate_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(Migrate_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.sourcevol = DummyVolume()
self.sourcevol.id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI._execute',
patch_execute)
def test_update_migrate_volume(self):
update_data = self.update_migrated_volume(None, self.sourcevol,
self.newvol, 'available')
self.assertIsNone(update_data['_name_id'])
self.assertIsNone(update_data['provider_location'])
class ManageUnmanage_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_volume(self):
ld_ok_iv = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ok_bv = {'pool_num': 0, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
ld_ng_pool = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl1 = {'pool_num': 0, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 0, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_purp = {'pool_num': 0, 'RPL Attribute': 'IV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_volume(ld_ok_iv))
self.assertTrue(self._is_manageable_volume(ld_ok_bv))
self.assertFalse(self._is_manageable_volume(ld_ng_pool))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl1))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl2))
self.assertFalse(self._is_manageable_volume(ld_ng_rpl3))
self.assertFalse(self._is_manageable_volume(ld_ng_purp))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_volumes(self):
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertEqual('LX:287RbQoP7VdwR1WsPC2fZT',
volumes[2]['reference']['source-name'])
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['asc'])
self.assertEqual(' :2000000991020012000A',
volumes[0]['reference']['source-name'])
self.assertEqual(10, len(volumes))
volume = {'id': '46045673-41e7-44a7-9333-02f07feab04b'}
current_volumes = []
current_volumes.append(volume)
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.assertFalse(volumes[2]['safe_to_manage'])
self.assertFalse(volumes[3]['safe_to_manage'])
self.assertTrue(volumes[4]['safe_to_manage'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
self.manage_existing(self.newvol, volumes[4]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
' :20000009910200140009')
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Specified resource is already in-use.'):
self.manage_existing(self.newvol, volumes[3]['reference'])
volume = {'source-name': 'LX:yEUHrXa5AHMjOZZLb93eP'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing(self.newvol, volume)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_get_size(self):
self.newvol = DummyVolume()
self.newvol.id = "46045673-41e7-44a7-9333-02f07feab04b"
current_volumes = []
volumes = self.get_manageable_volumes(current_volumes, None,
100, 0, ['reference'], ['dec'])
size_in_gb = self.manage_existing_get_size(self.newvol,
volumes[3]['reference'])
self.assertEqual(10, size_in_gb)
class ManageUnmanage_Snap_test(volume_helper.MStorageDSVDriver, test.TestCase):
@mock.patch('cinder.volume.drivers.nec.volume_common.MStorageVolumeCommon.'
'_create_ismview_dir', new=mock.Mock())
def setUp(self):
super(ManageUnmanage_Snap_test, self).setUp()
self._set_config(conf.Configuration(None), 'dummy', 'dummy')
self.do_setup(None)
self._properties['pool_pools'] = {0}
self._properties['pool_backup_pools'] = {1}
def test_is_manageable_snapshot(self):
ld_ok_sv1 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ok_sv2 = {'pool_num': 1, 'RPL Attribute': 'SV', 'Purpose': '---'}
ld_ng_pool = {'pool_num': 0, 'RPL Attribute': 'SV', 'Purpose': 'INV'}
ld_ng_rpl1 = {'pool_num': 1, 'RPL Attribute': 'MV', 'Purpose': 'INV'}
ld_ng_rpl2 = {'pool_num': 1, 'RPL Attribute': 'RV', 'Purpose': 'INV'}
ld_ng_rpl3 = {'pool_num': 1, 'RPL Attribute': 'IV', 'Purpose': '---'}
ld_ng_rpl4 = {'pool_num': 1, 'RPL Attribute': 'BV', 'Purpose': 'INV'}
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv1))
self.assertTrue(self._is_manageable_snapshot(ld_ok_sv2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_pool))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl1))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl2))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl3))
self.assertFalse(self._is_manageable_snapshot(ld_ng_rpl4))
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_get_manageable_snapshots(self):
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
volumes = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.assertEqual('LX:4T7JpyqI3UuPlKeT9D3VQF',
volumes[0]['reference']['source-name'])
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot(self):
mock_rename = mock.Mock()
self._cli.changeldname = mock_rename
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
self.newsnap.volume_id = "1febb976-86d0-42ed-9bc0-4aa3e158f27d"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.changeldname.assert_called_once_with(
None,
'LX:287RbQoP7VdwR1WsPC2fZT',
'LX:4T7JpyqI3UuPlKeT9D3VQF')
self.newsnap.volume_id = "AAAAAAAA"
with self.assertRaisesRegex(exception.ManageExistingInvalidReference,
'Snapshot source is unmatch.'):
self.manage_existing_snapshot(self.newsnap, snaps[0]['reference'])
self._cli.get_bvname.return_value = "2000000991020012000C"
self.newsnap.volume_id = "00046058-d38e-7f60-67b7-59ed6422520c"
snap = {'source-name': ' :2000000991020012000B'}
with self.assertRaisesRegex(exception.ManageExistingVolumeTypeMismatch,
'Volume type is unmatched.'):
self.manage_existing_snapshot(self.newsnap, snap)
@mock.patch('cinder.volume.drivers.nec.cli.MStorageISMCLI.'
'view_all', patch_view_all)
def test_manage_existing_snapshot_get_size(self):
self.newsnap = DummyVolume()
self.newsnap.id = "46045673-41e7-44a7-9333-02f07feab04b"
mock_getbvname = mock.Mock()
self._cli.get_bvname = mock_getbvname
self._cli.get_bvname.return_value = "yEUHrXa5AHMjOZZLb93eP"
current_snapshots = []
snaps = self.get_manageable_snapshots(current_snapshots, None,
100, 0, ['reference'], ['asc'])
size_in_gb = self.manage_existing_snapshot_get_size(
self.newsnap,
snaps[0]['reference'])
self.assertEqual(6, size_in_gb)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class AvailabilitySetsOperations(object):
"""AvailabilitySetsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client Api Version. Constant value: "2016-03-30".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-30"
self.config = config
def create_or_update(
self, resource_group_name, name, parameters, custom_headers=None, raw=False, **operation_config):
"""Create or update an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param name: The name of the availability set.
:type name: str
:param parameters: Parameters supplied to the Create Availability Set
operation.
:type parameters:
~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AvailabilitySet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'AvailabilitySet')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailabilitySet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'}
def delete(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Delete an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: OperationStatusResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2016_03_30.models.OperationStatusResponse
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatusResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'}
def get(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves information about an availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AvailabilitySet or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AvailabilitySet', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}'}
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Lists all availability sets in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AvailabilitySet
:rtype:
~azure.mgmt.compute.v2016_03_30.models.AvailabilitySetPaged[~azure.mgmt.compute.v2016_03_30.models.AvailabilitySet]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AvailabilitySetPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets'}
def list_available_sizes(
self, resource_group_name, availability_set_name, custom_headers=None, raw=False, **operation_config):
"""Lists all available virtual machine sizes that can be used to create a
new virtual machine in an existing availability set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param availability_set_name: The name of the availability set.
:type availability_set_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualMachineSize
:rtype:
~azure.mgmt.compute.v2016_03_30.models.VirtualMachineSizePaged[~azure.mgmt.compute.v2016_03_30.models.VirtualMachineSize]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_available_sizes.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'availabilitySetName': self._serialize.url("availability_set_name", availability_set_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualMachineSizePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes'}
|
|
#GUI_Configuration_state_management
import yaml
import gzip
from PyQt4 import QtGui
def ui_state(ui):
""" Returns the current state of the GUI in an easily usable dict """
state = dict()
ip = str(ui.ipField.text())
f = str(ui.fileField.text())
ip_vna = str(ui.vna_ip_field.text())
file_vna = str(ui.vna_file_field.text())
state["ip"] = ip
state["f"] = f
state["ip_vna"] = ip_vna
state["file_vna"] = file_vna
state["combos"] = list()
for index, combo in enumerate(
[(ui.smu1_combo, ui.smu1_layout),
(ui.smu2_combo, ui.smu2_layout),
(ui.smu3_combo, ui.smu3_layout),
(ui.smu4_combo, ui.smu4_layout)]):
combo_text = combo[0].currentText().toLower()
layout = combo[1]
groupbox = layout.itemAt(2).widget()
if "open" in combo_text:
# Combobox empty, don't save anything
continue
hdict = dict()
hdict["num"] = str(index+1)
if "voltage" in combo_text:
hdict["type"] = "voltage"
if "current" in combo_text:
hdict["type"] = "current"
if "constant" in combo_text:
hdict["value"] = str(groupbox.findChild(QtGui.QLineEdit, "constant_textbox").text())
hdict["compliance"] = str(groupbox.findChild(QtGui.QLineEdit, "compliance_textbox").text())
hdict["sfun"] = "constant"
if "step" in combo_text:
hdict["start"] = str(groupbox.findChild(QtGui.QLineEdit, "start_lineedit").text())
hdict["step"] = str(groupbox.findChild(QtGui.QLineEdit, "step_lineedit").text())
hdict["steps"] = str(groupbox.findChild(QtGui.QLineEdit, "steps_lineedit").text())
hdict["compliance"] = str(groupbox.findChild(QtGui.QLineEdit, "compliance_lineedit").text())
hdict["sfun"] = "step"
if "sweep" in combo_text and "list" not in combo_text:
hdict["stop"] = str(groupbox.findChild(QtGui.QLineEdit, "val_stop_field").text())
hdict["start"] = str(groupbox.findChild(QtGui.QLineEdit, "val_inicio_field").text())
hdict["step"] = str(groupbox.findChild(QtGui.QLineEdit, "step_field").text())
hdict["compliance"] = str(groupbox.findChild(QtGui.QLineEdit, "compliance_field").text())
hdict["sweep_type"] = str(groupbox.findChild(QtGui.QComboBox, "sweep_type_combobox").currentText())
hdict["sfun"] = "sweep"
if "list" in combo_text:
hdict["value_list"] = str(groupbox.findChild(QtGui.QTextEdit, "list_textedit").toPlainText())
hdict["sfun"] = "list"
state["combos"].append(hdict)
# VNA GUI saving from here
a = map(lambda option_box: option_box if option_box.isChecked() else None, [ui.s11_radio, ui.s12_radio, ui.s21_radio, ui.s22_radio])
a = filter(lambda option_box: option_box is not None,a)
state["spar"] = str(a[0].objectName()[0:3])
state["format_idx"] = ui.format_combobox.currentIndex()
if ui.center_span_radio.isChecked():
state["vna_type"] = "center_span"
state["center"] = str(ui.centralwidget.findChild(QtGui.QLineEdit, "center_field").text())
state["span"] = str(ui.centralwidget.findChild(QtGui.QLineEdit, "span_field").text())
elif ui.start_stop_radio.isChecked():
state["vna_type"] = "start_stop"
state["start"] = str(ui.centralwidget.findChild(QtGui.QLineEdit, "freqstart_field").text())
state["stop"] = str(ui.centralwidget.findChild(QtGui.QLineEdit, "freqstop_field").text())
state["points"] = str(ui.points_field.text())
state["autoscale"] = bool(ui.autoscale_checkbox.isChecked())
state["all_checked"] = bool(ui.all_checkbox.isChecked())
return state
def save_ui_file(ui, fname):
# Serialize dict to disk
fname = str(fname)
with gzip.open(fname, "wb") as stream:
yaml.dump(ui_state(ui), stream)
def save_ui(ui):
save_ui_file(ui, "ui_state.config")
def restore_ui_file(ui, fname):
fname = str(fname)
mapping = [(ui.smu1_combo, ui.smu1_layout),
(ui.smu2_combo, ui.smu2_layout),
(ui.smu3_combo, ui.smu3_layout),
(ui.smu4_combo, ui.smu4_layout)]
try:
with gzip.open(fname, 'rb') as stream:
state = yaml.load(stream)
except IOError as e:
# Start with empty interface as there's no saved configuration
return
ui.ipField.setText(state["ip"])
ui.fileField.setText(state["f"])
ui.vna_ip_field.setText(state["ip_vna"])
ui.vna_file_field.setText(state["file_vna"])
for combo in state["combos"]:
# Get the combo object in the Qt GUI that corresponds to the
# combo object in the yml representation. Do the same with the layout
wcombo = mapping[int(combo["num"])-1][0]
wlayout = mapping[int(combo["num"])-1][1]
groupbox = wlayout.itemAt(2).widget()
if combo["sfun"] == "sweep":
if combo["type"] == "voltage":
wcombo.setCurrentIndex(6)
elif combo["type"] == "current":
wcombo.setCurrentIndex(2)
if combo["sfun"] == "step":
if combo["type"] == "voltage":
wcombo.setCurrentIndex(8)
if combo["type"] == "current":
wcombo.setCurrentIndex(4)
if combo["sfun"] == "constant":
if combo["type"] == "voltage":
wcombo.setCurrentIndex(5)
if combo["type"] == "current":
wcombo.setCurrentIndex(1)
if combo["sfun"] == "list":
if combo["type"] == "voltage":
wcombo.setCurrentIndex(3)
if combo["type"] == "current":
wcombo.setCurrentIndex(7)
for combo in state["combos"]:
# Get the combo object in the Qt GUI that corresponds to the
# combo object in the yml representation. Do the same with the layout
wcombo = mapping[int(combo["num"])-1][0]
wlayout = mapping[int(combo["num"])-1][1]
groupbox = wlayout.itemAt(2).widget()
if combo["sfun"] == "sweep":
groupbox.findChild(QtGui.QLineEdit, "val_stop_field").setText(combo["stop"])
groupbox.findChild(QtGui.QLineEdit, "val_inicio_field").setText(combo["start"])
groupbox.findChild(QtGui.QLineEdit, "step_field").setText(combo["step"])
groupbox.findChild(QtGui.QLineEdit, "compliance_field").setText(combo["compliance"])
if combo["sweep_type"] == "Linear":
groupbox.findChild(QtGui.QComboBox, "sweep_type_combobox").setCurrentIndex(0)
if combo["sweep_type"] == "Log10":
groupbox.findChild(QtGui.QComboBox, "sweep_type_combobox").setCurrentIndex(1)
if combo["sweep_type"] == "Log25":
groupbox.findChild(QtGui.QComboBox, "sweep_type_combobox").setCurrentIndex(2)
if combo["sweep_type"] == "Log50":
groupbox.findChild(QtGui.QComboBox, "sweep_type_combobox").setCurrentIndex(3)
if combo["sfun"] == "step":
groupbox.findChild(QtGui.QLineEdit, "start_lineedit").setText(combo["start"])
groupbox.findChild(QtGui.QLineEdit, "step_lineedit").setText(combo["step"])
groupbox.findChild(QtGui.QLineEdit, "steps_lineedit").setText(combo["steps"])
groupbox.findChild(QtGui.QLineEdit, "compliance_lineedit").setText(combo["compliance"])
if combo["sfun"] == "constant":
groupbox.findChild(QtGui.QLineEdit, "constant_textbox").setText(combo["value"])
groupbox.findChild(QtGui.QLineEdit, "compliance_textbox").setText(combo["compliance"])
if combo["sfun"] == "list":
groupbox.findChild(QtGui.QTextEdit, "list_textedit").setText(combo["value_list"])
# Restore VNA GUI
if state["spar"] == "s11":
ui.s11_radio.setChecked(True)
elif state["spar"] == "s21":
ui.s21_radio.setChecked(True)
elif state["spar"] == "s12":
ui.s12_radio.setChecked(True)
elif state["spar"] == "s22":
ui.s22_radio.setChecked(True)
# Restore data format combobox
ui.format_combobox.setCurrentIndex(int(state["format_idx"]))
# Set center-span or start-stop
if state["vna_type"] == "center_span":
ui.center_span_radio.setChecked(True)
ui.centralwidget.findChild(QtGui.QLineEdit, "center_field").setText(state["center"])
ui.centralwidget.findChild(QtGui.QLineEdit, "span_field").setText(state["span"])
elif state["vna_type"] == "start_stop":
ui.start_stop_radio.setChecked(True)
ui.centralwidget.findChild(QtGui.QLineEdit, "freqstart_field").setText(state["start"])
ui.centralwidget.findChild(QtGui.QLineEdit, "freqstop_field").setText(state["stop"])
ui.points_field.setText(str(state["points"]))
ui.autoscale_checkbox.setChecked(bool(state["autoscale"]))
ui.all_checkbox.setChecked(bool(state["all_checked"]))
def restore_ui(ui):
restore_ui_file(ui, "ui_state.config")
|
|
# -*- coding: utf-8 -*-
'''
Return data to a PostgreSQL server with json data stored in Pg's jsonb data type
:maintainer: Dave Boucha <dave@saltstack.com>, Seth House <shouse@saltstack.com>, C. R. Oldham <cr@saltstack.com>
:maturity: new
:depends: python-psycopg2
:platform: all
To enable this returner, the minion will need the python client for PostgreSQL
installed and the following values configured in the minion or master
config. These are the defaults:
.. code-block:: yaml
returner.pgjsonb.host: 'salt'
returner.pgjsonb.user: 'salt'
returner.pgjsonb.pass: 'salt'
returner.pgjsonb.db: 'salt'
returner.pgjsonb.port: 5432
SSL is optional. The defaults are set to None. If you do not want to use SSL,
either exclude these options or set them to None.
.. code-block:: yaml
returner.pgjsonb.ssl_ca: None
returner.pgjsonb.ssl_cert: None
returner.pgjsonb.ssl_key: None
Alternative configuration values can be used by prefacing the configuration
with `alternative.`. Any values not found in the alternative configuration will
be pulled from the default location. As stated above, SSL configuration is
optional. The following ssl options are simply for illustration purposes:
.. code-block:: yaml
alternative.pgjsonb.host: 'salt'
alternative.pgjsonb.user: 'salt'
alternative.pgjsonb.pass: 'salt'
alternative.pgjsonb.db: 'salt'
alternative.pgjsonb.port: 5432
alternative.pgjsonb.ssl_ca: '/etc/pki/mysql/certs/localhost.pem'
alternative.pgjsonb.ssl_cert: '/etc/pki/mysql/certs/localhost.crt'
alternative.pgjsonb.ssl_key: '/etc/pki/mysql/certs/localhost.key'
Use the following Pg database schema:
.. code-block:: sql
CREATE DATABASE salt
WITH ENCODING 'utf-8';
--
-- Table structure for table `jids`
--
DROP TABLE IF EXISTS jids;
CREATE TABLE jids (
jid varchar(255) NOT NULL primary key,
load jsonb NOT NULL
);
CREATE INDEX idx_jids_jsonb on jids
USING gin (load)
WITH (fastupdate=on);
--
-- Table structure for table `salt_returns`
--
DROP TABLE IF EXISTS salt_returns;
CREATE TABLE salt_returns (
fun varchar(50) NOT NULL,
jid varchar(255) NOT NULL,
return jsonb NOT NULL,
id varchar(255) NOT NULL,
success varchar(10) NOT NULL,
full_ret jsonb NOT NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT NOW());
CREATE INDEX idx_salt_returns_id ON salt_returns (id);
CREATE INDEX idx_salt_returns_jid ON salt_returns (jid);
CREATE INDEX idx_salt_returns_fun ON salt_returns (fun);
CREATE INDEX idx_salt_returns_return ON salt_returns
USING gin (return) with (fastupdate=on);
CREATE INDEX idx_salt_returns_full_ret ON salt_returns
USING gin (full_ret) with (fastupdate=on);
--
-- Table structure for table `salt_events`
--
DROP TABLE IF EXISTS salt_events;
DROP SEQUENCE IF EXISTS seq_salt_events_id;
CREATE SEQUENCE seq_salt_events_id;
CREATE TABLE salt_events (
id BIGINT NOT NULL UNIQUE DEFAULT nextval('seq_salt_events_id'),
tag varchar(255) NOT NULL,
data jsonb NOT NULL,
alter_time TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
master_id varchar(255) NOT NULL);
CREATE INDEX idx_salt_events_tag on
salt_events (tag);
CREATE INDEX idx_salt_events_data ON salt_events
USING gin (data) with (fastupdate=on);
Required python modules: Psycopg2
To use this returner, append '--return pgjsonb' to the salt command.
.. code-block:: bash
salt '*' test.ping --return pgjsonb
To use the alternative configuration, append '--return_config alternative' to the salt command.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' test.ping --return pgjsonb --return_config alternative
To override individual configuration items, append --return_kwargs '{"key:": "value"}' to the salt command.
.. versionadded:: 2016.3.0
.. code-block:: bash
salt '*' test.ping --return pgjsonb --return_kwargs '{"db": "another-salt"}'
'''
from __future__ import absolute_import
# Let's not allow PyLint complain about string substitution
# pylint: disable=W1321,E1321
# Import python libs
from contextlib import contextmanager
import sys
import time
import logging
# Import salt libs
import salt.returners
import salt.utils.jid
import salt.exceptions
# Import third party libs
try:
import psycopg2
import psycopg2.extras
HAS_PG = True
except ImportError:
HAS_PG = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'pgjsonb'
def __virtual__():
if not HAS_PG:
return False
return True
def _get_options(ret=None):
'''
Returns options used for the MySQL connection.
'''
defaults = {'host': 'localhost',
'user': 'salt',
'pass': 'salt',
'db': 'salt',
'port': 5432}
attrs = {'host': 'host',
'user': 'user',
'pass': 'pass',
'db': 'db',
'port': 'port'}
_options = salt.returners.get_returner_options('returner.{0}'.format(__virtualname__),
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__,
defaults=defaults)
# Ensure port is an int
if 'port' in _options:
_options['port'] = int(_options['port'])
return _options
@contextmanager
def _get_serv(ret=None, commit=False):
'''
Return a Pg cursor
'''
_options = _get_options(ret)
try:
# An empty ssl_options dictionary passed to MySQLdb.connect will
# effectively connect w/o SSL.
ssl_options = {}
if _options.get('ssl_ca'):
ssl_options['ca'] = _options.get('ssl_ca')
if _options.get('ssl_cert'):
ssl_options['cert'] = _options.get('ssl_cert')
if _options.get('ssl_key'):
ssl_options['key'] = _options.get('ssl_key')
conn = psycopg2.connect(host=_options.get('host'),
user=_options.get('user'),
password=_options.get('pass'),
database=_options.get('db'),
port=_options.get('port'))
# ssl=ssl_options)
except psycopg2.OperationalError as exc:
raise salt.exceptions.SaltMasterError('pgjsonb returner could not connect to database: {exc}'.format(exc=exc))
cursor = conn.cursor()
try:
yield cursor
except psycopg2.DatabaseError as err:
error = err.args
sys.stderr.write(str(error))
cursor.execute("ROLLBACK")
raise err
else:
if commit:
cursor.execute("COMMIT")
else:
cursor.execute("ROLLBACK")
finally:
conn.close()
def returner(ret):
'''
Return data to a Pg server
'''
try:
with _get_serv(ret, commit=True) as cur:
sql = '''INSERT INTO salt_returns
(fun, jid, return, id, success, full_ret, alter_time)
VALUES (%s, %s, %s, %s, %s, %s, %s)'''
cur.execute(sql, (ret['fun'], ret['jid'],
psycopg2.extras.Json(ret['return']),
ret['id'],
ret.get('success', False),
psycopg2.extras.Json(ret),
time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())))
except salt.exceptions.SaltMasterError:
log.critical('Could not store return with pgjsonb returner. PostgreSQL server unavailable.')
def event_return(events):
'''
Return event to Pg server
Requires that configuration be enabled via 'event_return'
option in master config.
'''
with _get_serv(events, commit=True) as cur:
for event in events:
tag = event.get('tag', '')
data = event.get('data', '')
sql = '''INSERT INTO salt_events (tag, data, master_id, alter_time)
VALUES (%s, %s, %s, %s)'''
cur.execute(sql, (tag, psycopg2.extras.Json(data),
__opts__['id'], time.strftime('%Y-%m-%d %H:%M:%S %z', time.localtime())))
def save_load(jid, load, minions=None):
'''
Save the load to the specified jid id
'''
with _get_serv(commit=True) as cur:
sql = '''INSERT INTO jids
(jid, load)
VALUES (%s, %s)'''
try:
cur.execute(sql, (jid, psycopg2.extras.Json(load)))
except psycopg2.IntegrityError:
# https://github.com/saltstack/salt/issues/22171
# Without this try:except: we get tons of duplicate entry errors
# which result in job returns not being stored properly
pass
def save_minions(jid, minions): # pylint: disable=unused-argument
'''
Included for API consistency
'''
pass
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT load FROM jids WHERE jid = %s;'''
cur.execute(sql, (jid,))
data = cur.fetchone()
if data:
return data[0]
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT id, full_ret FROM salt_returns
WHERE jid = %s'''
cur.execute(sql, (jid,))
data = cur.fetchall()
ret = {}
if data:
for minion, full_ret in data:
ret[minion] = full_ret
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT s.id,s.jid, s.full_ret
FROM salt_returns s
JOIN ( SELECT MAX(`jid`) as jid
from salt_returns GROUP BY fun, id) max
ON s.jid = max.jid
WHERE s.fun = %s
'''
cur.execute(sql, (fun,))
data = cur.fetchall()
ret = {}
if data:
for minion, _, full_ret in data:
ret[minion] = full_ret
return ret
def get_jids():
'''
Return a list of all job ids
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT jid, load
FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid, load)
return ret
def get_minions():
'''
Return a list of minions
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT DISTINCT id
FROM salt_returns'''
cur.execute(sql)
data = cur.fetchall()
ret = []
for minion in data:
ret.append(minion[0])
return ret
def prep_jid(nocache=False, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.jid.gen_jid()
|
|
#
# ff7.data - Final Fantasy VII translation-related data tables
#
# Copyright (C) 2014 Christian Bauer <www.cebix.net>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
import ff7
#
# Kernel string list information (KERNEL.BIN archive)
#
kernelStringData = [
# index, numStrings, compressed, transDir, transFileName
( 0, 32, True, "kernel", "command_help.txt"),
( 1, 256, True, "kernel", "ability_help.txt"),
( 2, 128, True, "kernel", "item_help.txt"),
( 3, 128, True, "kernel", "weapon_help.txt"),
( 4, 32, True, "kernel", "armor_help.txt"),
( 5, 32, True, "kernel", "accessory_help.txt"),
( 6, 96, True, "kernel", "materia_help.txt"),
( 7, 64, True, "kernel", "key_item_help.txt"),
( 8, 32, False, "kernel", "command.txt"),
( 9, 256, False, "kernel", "ability.txt"),
(10, 128, False, "kernel", "item.txt"),
(11, 128, False, "kernel", "weapon.txt"),
(12, 32, False, "kernel", "armor.txt"),
(13, 32, False, "kernel", "accessory.txt"),
(14, 96, False, "kernel", "materia.txt"),
(15, 64, False, "kernel", "key_item.txt"),
(16, 128, True, "kernel", "battle.txt"),
(17, 16, False, "kernel", "summon.txt"),
]
#
# Translatable strings embedded in executable files
#
# English PAL version
execFileData_EN = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x39914, 0x0c, 23, False, "menu", "main.txt"),
(0x39a5c, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts use the 8x8 font with Japanese encoding
]),
("MENU", "CNFGMENU.MNU", [
(0x1ae8, 0x30, 51, False, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x82a4, 0x0c, 23, False, "menu", "equip_attr_short.txt"),
(0x83f8, 0x0a, 9, False, "menu", "element.txt"),
(0x8454, 0x24, 4, False, "menu", "remove.txt"),
(0x84e4, 0x14, 6, False, "menu", "materia.txt"),
(0x8570, 0x14, 35, False, "menu", "materia2.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1cfc, 0x26, 6, False, "menu", "form1.txt"),
(0x1de0, 0x16, 26, False, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x3260, 0x22, 25, False, "menu", "lv4_limit.txt"),
(0x3cd4, 0x0c, 11, False, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x2128, 0x24, 14, False, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x2934, 0x14, 14, False, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e6c, 0x0c, 10, False, "menu", "default_name.txt"),
(0x8ee4, 0x08, 8, False, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xeedc, 0x08, 1, False, "menu", "save1.txt"),
(0x12cfc, 0x24, 38, False, "menu", "save2.txt"),
(0x13260, 0x30, 20, False, "menu", "save3.txt"),
(0x13684, 0x06, 1, False, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x42d4, 0x14, 9, False, "menu", "shop_type.txt"),
(0x4388, 0x2e, 8, False, "menu", "shop_greeting1.txt"),
(0x4554, 0x2e, 8, False, "menu", "shop_greeting2.txt"),
(0x6160, 0x0a, 9, False, "menu", "element.txt"),
(0x61bc, 0x24, 4, False, "menu", "remove.txt"),
(0x624c, 0x14, 6, False, "menu", "materia.txt"),
(0x62d8, 0x14, 35, False, "menu", "materia2.txt"),
(0x6598, 0x16, 23, False, "menu", "equip_attr.txt"),
(0x6798, 0x14, 10, False, "menu", "shop.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x196c, 0x0a, 9, False, "menu", "element.txt"),
(0x19c8, 0x14, 27, False, "menu", "status.txt"),
(0x1be4, 0x0f, 27, False, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x5315c, 0x08, 1, False, "battle", "pause.txt"),
(0x53198, 0x0a, 32, False, "battle", "status.txt"),
(0x5330c, 0x1c, 1, False, "battle", "gil1.txt"),
(0x53328, 0x04, 1, False, "battle", "gil2.txt"),
(0x5332c, 0x08, 1, False, "battle", "gil3.txt"),
(0x53334, 0x0c, 1, False, "battle", "gil4.txt"),
(0x53480, 0x0c, 1, False, "battle", "arena1.txt"),
(0x5348c, 0x18, 1, False, "battle", "arena2.txt"),
(0x534a4, 0x16, 5, False, "battle", "arena3.txt"),
(0x5351c, 0x20, 24, False, "battle", "arena_handicap.txt"),
(0x5383c, 0x22, 3, False, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x122dc, 0x06, 1, False, "chocobo", "black.txt"),
(0x12400, 0x10, 24, False, "chocobo", "prices.txt"),
(0x12580, 0x08, 44, False, "chocobo", "names.txt"),
]),
]
# French PAL version
execFileData_FR = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x39944, 0x14, 23, False, "menu", "main.txt"),
(0x39b44, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts use the 8x8 font with Japanese encoding
]),
("MENU", "CNFGMENU.MNU", [
(0x1b2c, 0x44, 51, False, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x8354, 0x14, 23, False, "menu", "equip_attr.txt"),
(0x8568, 0x0a, 9, False, "menu", "element.txt"),
(0x85c4, 0x22, 4, False, "menu", "remove.txt"),
(0x864c, 0x1a, 42, False, "menu", "materia.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1cfc, 0x26, 6, False, "menu", "form1.txt"),
(0x1de0, 0x16, 26, False, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x325c, 0x3c, 25, False, "menu", "lv4_limit.txt"),
(0x3f58, 0x0e, 11, False, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x2128, 0x28, 14, False, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x293c, 0x1a, 14, False, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e6c, 0x0c, 10, False, "menu", "default_name.txt"),
(0x8ee4, 0x0c, 8, False, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xef1c, 0x0c, 1, False, "menu", "save1.txt"),
(0x12d48, 0x32, 37, False, "menu", "save2.txt"),
(0x13490, 0x40, 20, False, "menu", "save3.txt"),
(0x13a04, 0x08, 1, False, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x4398, 0x14, 9, False, "menu", "shop_type.txt"),
(0x444c, 0x2e, 8, False, "menu", "shop_greeting1.txt"),
(0x4ac4, 0x2e, 8, False, "menu", "shop_greeting2.txt"),
(0x6b7c, 0x0a, 9, False, "menu", "element.txt"),
(0x6bd8, 0x22, 4, False, "menu", "remove.txt"),
(0x6c60, 0x1a, 42, False, "menu", "materia.txt"),
(0x70a8, 0x1e, 5, False, "menu", "shop_menu.txt"),
(0x7140, 0x18, 10, False, "menu", "shop.txt"),
(0x7230, 0x16, 23, False, "menu", "equip_attr.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x1960, 0x0a, 9, False, "menu", "element.txt"),
(0x19bc, 0x14, 27, False, "menu", "status.txt"),
(0x1bd8, 0x1a, 27, False, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x5315c, 0x08, 1, False, "battle", "pause.txt"),
(0x53198, 0x14, 32, False, "battle", "status.txt"),
(0x5344c, 0x18, 1, False, "battle", "gil1.txt"),
(0x53464, 0x04, 1, False, "battle", "gil2.txt"),
(0x53468, 0x08, 1, False, "battle", "gil3.txt"),
(0x53470, 0x10, 1, False, "battle", "gil4.txt"),
(0x535c0, 0x0c, 1, False, "battle", "arena1.txt"),
(0x535cc, 0x20, 1, False, "battle", "arena2.txt"),
(0x535ec, 0x24, 5, False, "battle", "arena3.txt"),
(0x5369c, 0x20, 24, False, "battle", "arena_handicap.txt"),
(0x539bc, 0x22, 3, False, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x12ba8, 0x06, 1, False, "chocobo", "black.txt"),
(0x12ccc, 0x12, 24, False, "chocobo", "prices.txt"),
(0x12e7c, 0x0a, 39, False, "chocobo", "names.txt"),
]),
]
# German PAL version
execFileData_DE = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x39930, 0x12, 23, False, "menu", "main.txt"),
(0x39b04, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts use the 8x8 font with Japanese encoding
]),
("MENU", "CNFGMENU.MNU", [
(0x1b3c, 0x36, 51, False, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x8354, 0x16, 23, False, "menu", "equip_attr.txt"),
(0x8598, 0x0c, 9, False, "menu", "element.txt"),
(0x8604, 0x24, 4, False, "menu", "remove.txt"),
(0x8694, 0x1c, 42, False, "menu", "materia.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1cfc, 0x26, 6, False, "menu", "form1.txt"),
(0x1de0, 0x16, 26, False, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x3250, 0x28, 25, False, "menu", "lv4_limit.txt"),
(0x3d58, 0x12, 11, False, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x2120, 0x20, 14, False, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x293c, 0x16, 14, False, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e94, 0x0c, 10, False, "menu", "default_name.txt"),
(0x8f0c, 0x0a, 8, False, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xef6c, 0x0a, 1, False, "menu", "save1.txt"),
(0x12d90, 0x28, 37, False, "menu", "save2.txt"),
(0x13372, 0x42, 20, False, "menu", "save3.txt"),
(0x13910, 0x06, 1, False, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x4390, 0x28, 9, False, "menu", "shop_type.txt"),
(0x44f8, 0x2e, 8, False, "menu", "shop_greeting1.txt"),
(0x46c4, 0x2e, 8, False, "menu", "shop_greeting2.txt"),
(0x62d0, 0x0c, 9, False, "menu", "element.txt"),
(0x633c, 0x24, 4, False, "menu", "remove.txt"),
(0x63cc, 0x1c, 42, False, "menu", "materia.txt"),
(0x6868, 0x1e, 5, False, "menu", "shop_menu.txt"),
(0x6900, 0x20, 10, False, "menu", "shop.txt"),
(0x6a40, 0x16, 23, False, "menu", "equip_attr.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x1960, 0x0c, 9, False, "menu", "element.txt"),
(0x19cc, 0x14, 27, False, "menu", "status.txt"),
(0x1be8, 0x14, 27, False, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x53160, 0x08, 1, False, "battle", "pause.txt"),
(0x5319c, 0x0e, 32, False, "battle", "status.txt"),
(0x53390, 0x1c, 1, False, "battle", "gil1.txt"),
(0x533ac, 0x04, 1, False, "battle", "gil2.txt"),
(0x533b0, 0x08, 1, False, "battle", "gil3.txt"),
(0x533b8, 0x14, 1, False, "battle", "gil4.txt"),
(0x5350c, 0x08, 1, False, "battle", "arena1.txt"),
(0x53514, 0x0c, 1, False, "battle", "arena2.txt"),
(0x53520, 0x1a, 5, False, "battle", "arena3.txt"),
(0x535a7, 0x1f, 24, False, "battle", "arena_handicap.txt"),
(0x538b0, 0x23, 3, False, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x12b84, 0x06, 1, False, "chocobo", "black.txt"),
(0x12ca8, 0x11, 24, False, "chocobo", "prices.txt"),
(0x12e40, 0x08, 41, False, "chocobo", "names.txt"),
]),
]
# Spanish PAL version
execFileData_ES = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x398f8, 0x14, 23, False, "menu", "main.txt"),
(0x39af8, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts use the 8x8 font with Japanese encoding
]),
("MENU", "CNFGMENU.MNU", [
(0x1b3c, 0x46, 51, False, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x8354, 0x1a, 23, False, "menu", "equip_attr.txt"),
(0x85e0, 0x0a, 9, False, "menu", "element.txt"),
(0x863c, 0x28, 4, False, "menu", "remove.txt"),
(0x86dc, 0x1e, 42, False, "menu", "materia.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1cf4, 0x28, 6, False, "menu", "form1.txt"),
(0x1de4, 0x28, 26, False, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x3220, 0x28, 25, False, "menu", "lv4_limit.txt"),
(0x3d28, 0x14, 11, False, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x2128, 0x28, 14, False, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x29cc, 0x28, 14, False, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e88, 0x0c, 10, False, "menu", "default_name.txt"),
(0x8f00, 0x0c, 8, False, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xef6c, 0x08, 1, False, "menu", "save1.txt"),
(0x12d8c, 0x24, 37, False, "menu", "save2.txt"),
(0x13304, 0x44, 20, False, "menu", "save3.txt"),
(0x138cc, 0x06, 1, False, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x4388, 0x28, 9, False, "menu", "shop_type.txt"),
(0x44f0, 0x2e, 8, False, "menu", "shop_greeting1.txt"),
(0x46bc, 0x2e, 8, False, "menu", "shop_greeting2.txt"),
(0x62c8, 0x0a, 9, False, "menu", "element.txt"),
(0x6324, 0x28, 4, False, "menu", "remove.txt"),
(0x63c4, 0x1e, 42, False, "menu", "materia.txt"),
(0x68b4, 0x1e, 5, False, "menu", "shop_menu.txt"),
(0x694c, 0x1e, 10, False, "menu", "shop.txt"),
(0x6a78, 0x14, 23, False, "menu", "equip_attr.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x1960, 0x0a, 9, False, "menu", "element.txt"),
(0x19bc, 0x14, 26, False, "menu", "status.txt"),
(0x1bc4, 0x1e, 27, False, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x53170, 0x08, 1, False, "battle", "pause.txt"),
(0x531ac, 0x0c, 27, False, "battle", "status.txt"),
(0x53324, 0x18, 1, False, "battle", "gil1.txt"),
(0x5333c, 0x04, 1, False, "battle", "gil2.txt"),
(0x53340, 0x08, 1, False, "battle", "gil3.txt"),
(0x53348, 0x10, 1, False, "battle", "gil4.txt"),
(0x53498, 0x08, 1, False, "battle", "arena1.txt"),
(0x534a0, 0x20, 1, False, "battle", "arena2.txt"),
(0x534c0, 0x1b, 5, False, "battle", "arena3.txt"),
(0x53553, 0x27, 24, False, "battle", "arena_handicap.txt"),
(0x5391c, 0x24, 3, False, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x12b84, 0x06, 1, False, "chocobo", "black.txt"),
(0x12ca8, 0x11, 24, False, "chocobo", "prices.txt"),
(0x12e40, 0x08, 41, False, "chocobo", "names.txt"),
]),
]
# US version
execFileData_US = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x39a48, 0x0c, 23, False, "menu", "main.txt"),
(0x39b90, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts use the 8x8 font with Japanese encoding
]),
("MENU", "CNFGMENU.MNU", [
(0x1ae8, 0x30, 51, False, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x82a4, 0x0c, 23, False, "menu", "equip_attr_short.txt"),
(0x83f8, 0x0a, 9, False, "menu", "element.txt"),
(0x8454, 0x24, 4, False, "menu", "remove.txt"),
(0x84e4, 0x14, 6, False, "menu", "materia.txt"),
(0x8570, 0x14, 35, False, "menu", "materia2.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1cfc, 0x26, 6, False, "menu", "form1.txt"),
(0x1de0, 0x16, 26, False, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x3260, 0x22, 25, False, "menu", "lv4_limit.txt"),
(0x3cd4, 0x0c, 11, False, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x2128, 0x24, 14, False, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x2934, 0x14, 14, False, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e6c, 0x0c, 10, False, "menu", "default_name.txt"),
(0x8ee4, 0x08, 8, False, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xeedc, 0x08, 1, False, "menu", "save1.txt"),
(0x12cfc, 0x24, 38, False, "menu", "save2.txt"),
(0x13260, 0x30, 20, False, "menu", "save3.txt"),
(0x13684, 0x06, 1, False, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x42c8, 0x14, 9, False, "menu", "shop_type.txt"),
(0x437c, 0x2e, 8, False, "menu", "shop_greeting1.txt"),
(0x4548, 0x2e, 8, False, "menu", "shop_greeting2.txt"),
(0x6154, 0x0a, 9, False, "menu", "element.txt"),
(0x61b0, 0x24, 4, False, "menu", "remove.txt"),
(0x6240, 0x14, 6, False, "menu", "materia.txt"),
(0x62cc, 0x14, 35, False, "menu", "materia2.txt"),
(0x658c, 0x16, 23, False, "menu", "equip_attr.txt"),
(0x678c, 0x14, 10, False, "menu", "shop.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x196c, 0x0a, 9, False, "menu", "element.txt"),
(0x19c8, 0x14, 27, False, "menu", "status.txt"),
(0x1be4, 0x0f, 27, False, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x53148, 0x08, 1, False, "battle", "pause.txt"),
(0x53184, 0x0a, 32, False, "battle", "status.txt"),
(0x532f8, 0x1c, 1, False, "battle", "gil1.txt"),
(0x53314, 0x04, 1, False, "battle", "gil2.txt"),
(0x53318, 0x08, 1, False, "battle", "gil3.txt"),
(0x53320, 0x0c, 1, False, "battle", "gil4.txt"),
(0x5346c, 0x0c, 1, False, "battle", "arena1.txt"),
(0x53478, 0x18, 1, False, "battle", "arena2.txt"),
(0x53490, 0x16, 5, False, "battle", "arena3.txt"),
(0x53508, 0x20, 24, False, "battle", "arena_handicap.txt"),
(0x53828, 0x22, 3, False, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x122c8, 0x06, 1, False, "chocobo", "black.txt"),
(0x123ec, 0x10, 24, False, "chocobo", "prices.txt"),
(0x1256c, 0x08, 44, False, "chocobo", "names.txt"),
]),
]
# Japanese International version
execFileData_JP = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x39880, 0x0c, 25, True, "menu", "main.txt"),
(0x399e0, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts
]),
("MENU", "CNFGMENU.MNU", [
(0x1a60, 0x1c, 51, True, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x81d0, 0x0e, 24, True, "menu", "equip_attr.txt"),
(0x8354, 0x10, 4, True, "menu", "remove.txt"),
(0x8394, 0x10, 42, True, "menu", "materia.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1c98, 0x14, 6, True, "menu", "form1.txt"),
(0x1d10, 0x0f, 26, True, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x33d0, 0x28, 25, True, "menu", "lv4_limit.txt"),
(0x3ed8, 0x08, 11, True, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x214c, 0x19, 14, True, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x28d0, 0x14, 14, True, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e34, 0x0c, 10, True, "menu", "default_name.txt"),
(0x8eac, 0x08, 8, True, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xf070, 0x08, 1, True, "menu", "save1.txt"),
(0x12e98, 0x22, 39, True, "menu", "save2.txt"),
(0x133c2, 0x1e, 20, True, "menu", "save3.txt"),
(0x13650, 0x20, 5, True, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x40d8, 0x14, 9, True, "menu", "shop_type.txt"),
(0x418c, 0x14, 8, True, "menu", "shop_greeting1.txt"),
(0x4254, 0x14, 8, True, "menu", "shop_greeting2.txt"),
(0x431c, 0x14, 8, True, "menu", "shop_greeting3.txt"),
(0x43e4, 0x14, 8, True, "menu", "shop_greeting4.txt"),
(0x44ac, 0x14, 8, True, "menu", "shop_greeting5.txt"),
(0x5fb4, 0x10, 4, True, "menu", "remove.txt"),
(0x5ff4, 0x10, 42, True, "menu", "materia.txt"),
(0x6298, 0x0e, 24, True, "menu", "equip_attr.txt"),
(0x63f8, 0x10, 10, True, "menu", "shop.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x166c, 0x0a, 27, True, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x536d0, 0x08, 1, True, "battle", "pause.txt"),
(0x5370c, 0x0c, 32, True, "battle", "status.txt"),
(0x538b8, 0x18, 1, True, "battle", "gil1.txt"),
(0x538d0, 0x04, 1, True, "battle", "gil2.txt"),
(0x538d4, 0x08, 1, True, "battle", "gil3.txt"),
(0x538dc, 0x08, 1, True, "battle", "gil4.txt"),
(0x53a24, 0x10, 1, True, "battle", "arena1.txt"),
(0x53a34, 0x10, 1, True, "battle", "arena2.txt"),
(0x53a44, 0x10, 5, True, "battle", "arena3.txt"),
(0x53a95, 0x11, 24, True, "battle", "arena_handicap.txt"),
(0x53c50, 0x14, 3, True, "battle", "worried.txt"),
]),
("MINI", "CHOCOBO.BIN", [
(0x12c80, 0x08, 1, True, "chocobo", "black.txt"),
(0x12da0, 0x10, 24, True, "chocobo", "prices.txt"),
# TODO: In the Japanese version, each name has 6 characters, followed by 2 data bytes.
# The translation tools cannot currently handle this.
# (0x12f20, 0x108, 1, True, "chocobo", "names.txt"),
]),
]
# Original Japanese version
execFileData_JO = [
# discDir, discFileName, offsetList
("", "<EXEC>", [
# offset, stringSize, numStrings, jpEnc, transDir, transFileName
(0x397f8, 0x0c, 25, True, "menu", "main.txt"),
(0x39958, 0x08, 3, True, "menu", "main2.txt"), # "HP"/"MP"/"LV" texts
]),
("MENU", "CNFGMENU.MNU", [
(0x1a60, 0x1c, 51, True, "menu", "config.txt"),
]),
("MENU", "EQIPMENU.MNU", [
(0x6310, 0x0e, 24, True, "menu", "equip_attr.txt"),
(0x6494, 0x10, 4, True, "menu", "remove.txt"),
(0x64d4, 0x10, 42, True, "menu", "materia.txt"),
]),
("MENU", "FORMMENU.MNU", [
(0x1c98, 0x14, 6, True, "menu", "form1.txt"),
(0x1d10, 0x0f, 26, True, "menu", "form2.txt"),
]),
("MENU", "ITEMMENU.MNU", [
(0x33d0, 0x28, 25, True, "menu", "lv4_limit.txt"),
(0x3ed8, 0x08, 11, True, "menu", "item.txt"),
]),
("MENU", "LIMTMENU.MNU", [
(0x214c, 0x19, 14, True, "menu", "limit.txt"),
]),
("MENU", "MGICMENU.MNU", [
(0x28d0, 0x14, 14, True, "menu", "magic.txt"),
]),
("MENU", "NAMEMENU.MNU", [
(0x8e00, 0x10, 6, True, "menu", "name2.txt"),
(0x8e60, 0x0c, 10, True, "menu", "default_name.txt"),
(0x8ed8, 0x08, 8, True, "menu", "name.txt"),
]),
("MENU", "SAVEMENU.MNU", [
( 0xecf8, 0x08, 1, True, "menu", "save1.txt"),
(0x13004, 0x18, 38, True, "menu", "save2.txt"),
(0x1339a, 0x1e, 20, True, "menu", "save3.txt"),
(0x13628, 0x20, 5, True, "menu", "save4.txt"),
]),
("MENU", "SHOPMENU.MNU", [
(0x40d8, 0x14, 8, True, "menu", "shop_type.txt"),
(0x4178, 0x14, 8, True, "menu", "shop_greeting1.txt"),
(0x4240, 0x14, 8, True, "menu", "shop_greeting2.txt"),
(0x4308, 0x14, 8, True, "menu", "shop_greeting3.txt"),
(0x43d0, 0x14, 8, True, "menu", "shop_greeting4.txt"),
(0x4498, 0x14, 8, True, "menu", "shop_greeting5.txt"),
(0x5fa0, 0x10, 4, True, "menu", "remove.txt"),
(0x5fe0, 0x10, 42, True, "menu", "materia.txt"),
(0x6284, 0x0e, 24, True, "menu", "equip_attr.txt"),
(0x63e4, 0x10, 10, True, "menu", "shop.txt"),
]),
("MENU", "STATMENU.MNU", [
(0x166c, 0x0a, 27, True, "menu", "stat.txt"),
]),
("BATTLE", "BATTLE.X", [
(0x536bc, 0x05, 32, True, "battle", "status.txt"),
(0x53788, 0x18, 1, True, "battle", "gil1.txt"),
(0x537a0, 0x04, 1, True, "battle", "gil2.txt"),
(0x537a4, 0x08, 1, True, "battle", "gil3.txt"),
(0x537ac, 0x08, 1, True, "battle", "gil4.txt"),
(0x538f4, 0x10, 1, True, "battle", "arena1.txt"),
(0x53904, 0x10, 1, True, "battle", "arena2.txt"),
(0x53914, 0x10, 5, True, "battle", "arena3.txt"),
(0x53965, 0x11, 24, True, "battle", "arena_handicap.txt"),
(0x53b20, 0x14, 3, True, "battle", "worried.txt"),
]),
("FIELD", "CHOCOBO.BIN", [
(0x12318, 0x08, 1, True, "chocobo", "black.txt"),
(0x12438, 0x10, 24, True, "chocobo", "prices.txt"),
# TODO: In the Japanese version, each name has 6 characters, followed by 2 data bytes.
# The translation tools cannot currently handle this.
# (0x125b0, 0x108, 1, True, "chocobo", "names.txt"),
]),
]
def execFileData(version):
if version == ff7.Version.EN:
return execFileData_EN
elif version == ff7.Version.FR:
return execFileData_FR
elif version == ff7.Version.DE:
return execFileData_DE
elif version == ff7.Version.ES:
return execFileData_ES
elif version == ff7.Version.US:
return execFileData_US
elif version == ff7.Version.JP:
return execFileData_JP
elif version == ff7.Version.JO:
return execFileData_JO
else:
return []
#
# Translatable strings embedded in snowboard minigame (SNOBO2)
#
# English and Japanese International versions
snobo2Data_EN = [
# offset, stringSize
(0x354, 8),
(0x35c, 12),
(0x368, 8),
(0x370, 12),
(0x37c, 8),
(0x384, 8),
(0x38c, 8),
(0x394, 8),
(0x39c, 4),
(0x3a0, 8),
(0x3a8, 8),
(0x3b0, 8),
(0x3b8, 8),
(0x3c0, 4),
(0x3c4, 8),
(0x3cc, 8),
(0x3d4, 8),
(0x3dc, 8),
(0x3e4, 12),
(0x3f0, 8),
(0x3f8, 8),
(0x400, 8),
(0x408, 8),
(0x410, 8),
(0x418, 8),
(0x420, 8),
(0x428, 8),
(0x430, 8),
(0x438, 8),
(0x440, 5),
]
# French European version
snobo2Data_FR = [
# offset, stringSize
(0x350, 12),
(0x35c, 8),
(0x364, 12),
(0x370, 12),
(0x37c, 8),
(0x384, 8),
(0x38c, 12),
(0x398, 8),
(0x3a0, 12),
(0x3ac, 8),
(0x3b4, 8),
(0x3bc, 8),
(0x3c4, 12),
(0x3d0, 4),
(0x3d4, 8),
(0x3dc, 8),
(0x3e4, 8),
(0x3ec, 12),
(0x3f8, 8),
(0x400, 8),
(0x408, 8),
(0x410, 8),
(0x418, 8),
(0x420, 8),
(0x428, 8),
(0x430, 8),
(0x438, 8),
(0x440, 8),
(0x448, 6),
]
# German European version
snobo2Data_DE = [
# offset, stringSize
(0x350, 8),
(0x358, 16),
(0x368, 8),
(0x370, 8),
(0x378, 12),
(0x384, 8),
(0x38c, 8),
(0x394, 12),
(0x3a0, 8),
(0x3a8, 4),
(0x3ac, 8),
(0x3b4, 8),
(0x3bc, 12),
(0x3c8, 8),
(0x3d0, 4),
(0x3d4, 8),
(0x3dc, 8),
(0x3e4, 8),
(0x3ec, 8),
(0x3f4, 8),
(0x3fc, 8),
(0x404, 8),
(0x40c, 8),
(0x414, 8),
(0x41c, 8),
(0x424, 4),
(0x428, 4),
(0x42c, 4),
(0x430, 4),
(0x434, 3),
]
# Spanish European version
snobo2Data_ES = [
# offset, stringSize
(0x350, 8),
(0x358, 8),
(0x360, 12),
(0x36c, 12),
(0x378, 8),
(0x380, 8),
(0x388, 8),
(0x390, 4),
(0x394, 8),
(0x39c, 8),
(0x3a4, 8),
(0x3ac, 12),
(0x3b8, 12),
(0x3c4, 4),
(0x3c8, 8),
(0x3d0, 8),
(0x3d8, 8),
(0x3e0, 12),
(0x3ec, 8),
(0x3f4, 8),
(0x3fc, 8),
(0x404, 8),
(0x40c, 8),
(0x414, 8),
(0x41c, 8),
(0x424, 8),
(0x42c, 8),
(0x434, 8),
(0x43c, 8),
]
def snobo2Data(version):
if version in [ff7.Version.EN, ff7.Version.US, ff7.Version.JP]:
return snobo2Data_EN
elif version == ff7.Version.FR:
return snobo2Data_FR
elif version == ff7.Version.DE:
return snobo2Data_DE
elif version == ff7.Version.ES:
return snobo2Data_ES
else:
return None
#
# World module string list offset and size
#
def worldStringListOffset(version):
if version == ff7.Version.EN:
return 0x1e5b4 # English European version
elif version == ff7.Version.FR:
return 0x1e5b4 # French European version
elif version == ff7.Version.DE:
return 0x1e5b4 # German European version
elif version == ff7.Version.ES:
return 0x1e5b4 # Spanish European version
elif version == ff7.Version.US:
return 0x1e5f0 # US verson
elif version == ff7.Version.JP:
return 0x1e5c0 # Japanese International version
elif version == ff7.Version.JO:
return 0x1e430 # Original Japanese version
else:
return None
worldStringListSize = 0x1000
#
# Offset of sorting table in ITEMMENU.MNU
#
def itemTableOffset(version):
if version == ff7.Version.EN:
return 0x35b4 # English European version
elif version == ff7.Version.FR:
return 0x3838 # French European version
elif version == ff7.Version.DE:
return 0x3638 # German European version
elif version == ff7.Version.ES:
return 0x3608 # Spanish European version
elif version == ff7.Version.US:
return 0x35b4 # US version
elif version == ff7.Version.JP:
return 0x37b8 # Japanese version
else:
return None
#
# List of field maps, excluding the following ones which are dummied out or
# contain no text in any version of the game:
# - BLACKBGD
# - BLACKBGF
# - BLACKBGG
# - BLIN69_2
# - CONVIL_3
# - DUMMY
# - FSHIP_26
# - HYOU14
# - JUNMON
# - LAS4_42
# - M_ENDO
# - NIVGATE3
# - NIVINN_3
# - NIVL_4
# - PASS
# - Q_5
# - SUBIN_4
# - TRAP
# - WM*
# - XMVTES
#
mapNames = [
"4SBWY_1", "4SBWY_22", "4SBWY_2", "4SBWY_3", "4SBWY_4", "4SBWY_5",
"4SBWY_6", "5MIN1_1", "5MIN1_2", "5TOWER", "7MIN1", "ANCNT1", "ANCNT2",
"ANCNT3", "ANCNT4", "ANFRST_1", "ANFRST_2", "ANFRST_3", "ANFRST_4",
"ANFRST_5", "ASTAGE_A", "ASTAGE_B", "BIGWHEEL", "BLACKBG1", "BLACKBG2",
"BLACKBG3", "BLACKBG4", "BLACKBG5", "BLACKBG6", "BLACKBG7", "BLACKBG8",
"BLACKBG9", "BLACKBGA", "BLACKBGB", "BLACKBGC", "BLACKBGE", "BLACKBGH",
"BLACKBGI", "BLACKBGJ", "BLACKBGK", "BLIN1", "BLIN2", "BLIN2_I",
"BLIN3_1", "BLIN59", "BLIN60_1", "BLIN60_2", "BLIN61", "BLIN62_1",
"BLIN62_2", "BLIN62_3", "BLIN63_1", "BLIN63_T", "BLIN64", "BLIN65_1",
"BLIN65_2", "BLIN66_1", "BLIN66_2", "BLIN66_3", "BLIN66_4", "BLIN66_5",
"BLIN66_6", "BLIN671B", "BLIN67_1", "BLIN67_2", "BLIN673B", "BLIN67_3",
"BLIN67_4", "BLIN68_1", "BLIN68_2", "BLIN69_1", "BLIN70_1", "BLIN70_2",
"BLIN70_3", "BLIN70_4", "BLINELE", "BLINST_1", "BLINST_2", "BLINST_3",
"BLUE_1", "BLUE_2", "BONEVIL2", "BONEVIL", "BUGIN1A", "BUGIN1B",
"BUGIN1C", "BUGIN2", "BUGIN3", "BWHLIN2", "BWHLIN", "CANON_1",
"CANON_2", "CARGOIN", "CHORACE2", "CHORACE", "CHRIN_1A", "CHRIN_1B",
"CHRIN_2", "CHRIN_3A", "CHRIN_3B", "CHURCH", "CLSIN2_1", "CLSIN2_2",
"CLSIN2_3", "COLNE_1", "COLNE_2", "COLNE_3", "COLNE_4", "COLNE_5",
"COLNE_6", "COLNE_B1", "COLNE_B3", "COLOIN1", "COLOIN2", "COLOSS",
"CONDOR1", "CONDOR2", "CONVIL_1", "CONVIL_2", "CONVIL_4", "COREL1",
"COREL2", "COREL3", "CORELIN", "COS_BTM2", "COS_BTM", "COSIN1_1",
"COSIN1", "COSIN2", "COSIN3", "COSIN4", "COSIN5", "COSMIN2", "COSMIN3",
"COSMIN4", "COSMIN6", "COSMIN7", "COSMO2", "COSMO", "COS_TOP",
"CRATER_1", "CRATER_2", "CRCIN_1", "CRCIN_2", "DATIAO_1", "DATIAO_2",
"DATIAO_3", "DATIAO_4", "DATIAO_5", "DATIAO_6", "DATIAO_7", "DATIAO_8",
"DEL12", "DEL1", "DEL2", "DEL3", "DELINN", "DELMIN12", "DELMIN1",
"DELMIN2", "DELPB", "DESERT1", "DESERT2", "DYNE", "EALIN_12", "EALIN_1",
"EALIN_2", "EALS_1", "ELEOUT", "ELEVTR1", "ELM", "ELM_I", "ELMIN1_1",
"ELMIN1_2", "ELMIN2_1", "ELMIN2_2", "ELMIN3_1", "ELMIN3_2", "ELMIN4_1",
"ELMIN4_2", "ELMINN_1", "ELMINN_2", "ELMPB", "ELMTOW", "ELM_WA",
"FALLP", "FARM", "FRCYO2", "FRCYO", "FR_E", "FRMIN", "FSHIP_12",
"FSHIP_1", "FSHIP_22", "FSHIP_23", "FSHIP_24", "FSHIP_25", "FSHIP_2",
"FSHIP_3", "FSHIP_42", "FSHIP_4", "FSHIP_5", "GAIA_1", "GAIA_2",
"GAIA_31", "GAIA_32", "GAIAFOOT", "GAIIN_1", "GAIIN_2", "GAIIN_3",
"GAIIN_4", "GAIIN_5", "GAIIN_6", "GAIIN_7", "GAMES_1", "GAMES_2",
"GAMES", "GHOTEL", "GHOTIN_1", "GHOTIN_2", "GHOTIN_3", "GHOTIN_4",
"GIDUN_1", "GIDUN_2", "GIDUN_3", "GIDUN_4", "GLDELEV", "GLDGATE",
"GLDINFO", "GLDST", "GNINN", "GNMK", "GNMKF", "GOMIN", "GONGAGA",
"GON_I", "GONJUN1", "GONJUN2", "GON_WA1", "GON_WA2", "GOSON", "HEKIGA",
"HIDEWAY1", "HIDEWAY2", "HIDEWAY3", "HILL2", "HILL", "HOLU_1", "HOLU_2",
"HYOU10", "HYOU11", "HYOU12", "HYOU13_1", "HYOU13_2", "HYOU1", "HYOU2",
"HYOU3", "HYOU4", "HYOU5_1", "HYOU5_2", "HYOU5_3", "HYOU5_4", "HYOU6",
"HYOU7", "HYOU8_1", "HYOU8_2", "HYOU9", "HYOUMAP", "ICEDUN_1",
"ICEDUN_2", "ITHILL", "ITHOS", "ITMIN1", "ITMIN2", "ITOWN12", "ITOWN1A",
"ITOWN1B", "ITOWN2", "ITOWN_I", "ITOWN_M", "ITOWN_W", "JAIL1", "JAIL2",
"JAIL3", "JAIL4", "JAILIN1", "JAILIN2", "JAILIN3", "JAILIN4", "JAILPB",
"JET", "JETIN1", "JTEMPLB", "JTEMPLC", "JTEMPL", "JTMPIN1", "JTMPIN2",
"JUMIN", "JUN_A", "JUNAIR2", "JUNAIR", "JUNBIN12", "JUNBIN1",
"JUNBIN21", "JUNBIN22", "JUNBIN3", "JUNBIN4", "JUNBIN5", "JUNDOC1A",
"JUNDOC1B", "JUNELE1", "JUNELE2", "JUN_I1", "JUN_I2", "JUNIN1A",
"JUNIN1", "JUNIN2", "JUNIN3", "JUNIN4", "JUNIN5", "JUNIN6", "JUNIN7",
"JUNINN", "JUN_M", "JUNMIN1", "JUNMIN2", "JUNMIN3", "JUNMIN4",
"JUNMIN5", "JUNON", "JUNONE22", "JUNONE2", "JUNONE3", "JUNONE4",
"JUNONE5", "JUNONE6", "JUNONE7", "JUNONL1", "JUNONL2", "JUNONL3",
"JUNONR1", "JUNONR2", "JUNONR3", "JUNONR4", "JUNPB_1", "JUNPB_2",
"JUNPB_3", "JUNSBD1", "JUN_WA", "JUN_W", "KURO_10", "KURO_11",
"KURO_12", "KURO_1", "KURO_2", "KURO_3", "KURO_4", "KURO_5", "KURO_6",
"KURO_7", "KURO_82", "KURO_8", "KURO_9", "LAS0_1", "LAS0_2", "LAS0_3",
"LAS0_4", "LAS0_5", "LAS0_6", "LAS0_7", "LAS0_8", "LAS1_1", "LAS1_2",
"LAS1_3", "LAS1_4", "LAS2_1", "LAS2_2", "LAS2_3", "LAS2_4", "LAS3_1",
"LAS3_2", "LAS3_3", "LAS4_0", "LAS4_1", "LAS4_2", "LAS4_3", "LAS4_4",
"LASTMAP", "LIFE2", "LIFE", "LOSIN1", "LOSIN2", "LOSIN3", "LOSINN",
"LOSLAKE1", "LOSLAKE2", "LOSLAKE3", "LOST1", "LOST2", "LOST3", "MD0",
"MD1_1", "MD1_2", "MD1STIN", "MD8_1", "MD8_2", "MD8_32", "MD8_3",
"MD8_4", "MD8_52", "MD8_5", "MD8_6", "MD8_B1", "MD8_B2", "MD8BRDG2",
"MD8BRDG", "MD_E1", "MDS5_1", "MDS5_2", "MDS5_3", "MDS5_4", "MDS5_5",
"MDS5_DK", "MDS5_I", "MDS5_M", "MDS5_W", "MDS6_1", "MDS6_22", "MDS6_2",
"MDS6_3", "MDS7", "MDS7_IM", "MDS7PB_1", "MDS7PB_2", "MDS7PLR1",
"MDS7PLR2", "MDS7ST1", "MDS7ST2", "MDS7ST32", "MDS7ST33", "MDS7ST3",
"MDS7_W1", "MDS7_W2", "MDS7_W3", "MIDGAL", "MKT_IA", "MKTINN", "MKT_M",
"MKT_MENS", "MKTPB", "MKT_S1", "MKT_S2", "MKT_S3", "MKT_W", "MOGU_1",
"MOVE_D", "MOVE_F", "MOVE_I", "MOVE_R", "MOVE_S", "MOVE_U", "MRKT1",
"MRKT2", "MRKT3", "MRKT4", "MTCRL_0", "MTCRL_1", "MTCRL_2", "MTCRL_3",
"MTCRL_4", "MTCRL_5", "MTCRL_6", "MTCRL_7", "MTCRL_8", "MTCRL_9",
"MTNVL2", "MTNVL3", "MTNVL4", "MTNVL5", "MTNVL6B", "MTNVL6", "NCOIN1",
"NCOIN2", "NCOIN3", "NCOINN", "NCOREL2", "NCOREL3", "NCOREL", "NIV_CL",
"NIVGATE2", "NIVGATE4", "NIVGATE", "NIVINN_1", "NIVINN_2", "NIVL_2",
"NIVL_3", "NIVL_B12", "NIVL_B1", "NIVL_B22", "NIVL_B2", "NIVL",
"NIVL_E1", "NIVL_E2", "NIVL_E3", "NIV_TI1", "NIV_TI2", "NIV_TI3",
"NIV_TI4", "NIV_W", "NMKIN_1", "NMKIN_2", "NMKIN_3", "NMKIN_4",
"NMKIN_5", "NRTHMK", "NVDUN1", "NVDUN2", "NVDUN31", "NVDUN3", "NVDUN4",
"NVMIN1_1", "NVMIN1_2", "NVMKIN1", "NVMKIN21", "NVMKIN22", "NVMKIN23",
"NVMKIN31", "NVMKIN32", "ONNA_1", "ONNA_2", "ONNA_3", "ONNA_4",
"ONNA_52", "ONNA_5", "ONNA_6", "PILLAR_1", "PILLAR_2", "PILLAR_3",
"PRISILA", "PSDUN_1", "PSDUN_2", "PSDUN_3", "PSDUN_4", "Q_1", "Q_2",
"Q_3", "Q_4", "RCKT2", "RCKT32", "RCKT3", "RCKTBAS1", "RCKTBAS2",
"RCKT", "RCKTIN1", "RCKTIN2", "RCKTIN3", "RCKTIN4", "RCKTIN5",
"RCKTIN6", "RCKTIN7", "RCKTIN8", "RKT_I", "RKTINN1", "RKTINN2",
"RKTMIN1", "RKTMIN2", "RKTSID", "RKT_W", "ROADEND", "ROOTMAP", "ROPEST",
"SANDUN_1", "SANDUN_2", "SANGO1", "SANGO2", "SANGO3", "SEA", "SEMKIN_1",
"SEMKIN_2", "SEMKIN_3", "SEMKIN_4", "SEMKIN_5", "SEMKIN_6", "SEMKIN_7",
"SEMKIN_8", "SETO1", "SHIP_1", "SHIP_2", "SHPIN_22", "SHPIN_2",
"SHPIN_3", "SICHI", "SINBIL_1", "SINBIL_2", "SININ1_1", "SININ1_2",
"SININ2_1", "SININ2_2", "SININ3", "SININB1", "SININB2", "SININB31",
"SININB32", "SININB33", "SININB34", "SININB35", "SININB36", "SININB41",
"SININB42", "SININB51", "SININB52", "SKY", "SLFRST_1", "SLFRST_2",
"SMKIN_1", "SMKIN_2", "SMKIN_3", "SMKIN_4", "SMKIN_5", "SNINN_1",
"SNINN_2", "SNINN_B1", "SNMAYOR", "SNMIN1", "SNMIN2", "SNOW", "SNW_W",
"SOUTHMK1", "SOUTHMK2", "SPGATE", "SPIPE_1", "SPIPE_2", "STARTMAP",
"SUBIN_1A", "SUBIN_1B", "SUBIN_2A", "SUBIN_2B", "SUBIN_3", "TIN_1",
"TIN_2", "TIN_3", "TIN_4", "TRACKIN2", "TRACKIN", "TRNAD_1", "TRNAD_2",
"TRNAD_3", "TRNAD_4", "TRNAD_51", "TRNAD_52", "TRNAD_53", "TUNNEL_1",
"TUNNEL_2", "TUNNEL_3", "TUNNEL_4", "TUNNEL_5", "TUNNEL_6", "UJUNON1",
"UJUNON2", "UJUNON3", "UJUNON4", "UJUNON5", "UJUN_W", "UTA_IM", "UTAPB",
"UTA_WA", "UTMIN1", "UTMIN2", "UTTMPIN1", "UTTMPIN2", "UTTMPIN3",
"UTTMPIN4", "UUTAI1", "UUTAI2", "WCRIMB_1", "WCRIMB_2", "WHITE1",
"WHITE2", "WHITEBG1", "WHITEBG2", "WHITEBG3", "WHITEIN", "WOA_1",
"WOA_2", "WOA_3", "YOUGAN2", "YOUGAN3", "YOUGAN", "YUFY1", "YUFY2",
"ZCOAL_1", "ZCOAL_2", "ZCOAL_3", "ZMIND1", "ZMIND2", "ZMIND3", "ZTRUCK",
"ZZ1", "ZZ2", "ZZ3", "ZZ4", "ZZ5", "ZZ6", "ZZ7", "ZZ8",
]
def fieldMaps(version):
maps = mapNames
# The FRCYO2 map (Chocobo stable disc 2/3) is only present in European
# versions of the game.
if not ff7.isEuropean(version):
maps.remove("FRCYO2")
# Some maps from the original Japanese release were dummied out in later
# versions, others were added
if version == ff7.Version.JO:
maps.remove("MDS7ST33")
maps.remove("MIDGAL")
maps.remove("NIVGATE4")
maps.remove("SININB34")
maps.remove("SININB35")
maps.remove("SININB36")
maps.remove("ZTRUCK")
else:
maps.remove("BLACKBGA")
maps.remove("FALLP")
maps.remove("ONNA_1")
maps.remove("ONNA_3")
maps.remove("ONNA_6")
maps.remove("WHITEBG1")
maps.remove("WHITEBG2")
return maps
|
|
import random
class Gen(object):
'''
A set of values, which may either be randomly drawn from or
traversed serially. For uncountable sets (or those which are pain
to exhaustively enumerate) the function series should return None
'''
def series(self, size):
'''
An iterator over all elements of the set smaller than `size`
'''
raise NotImplementedError()
def coseries(self, range_series):
'''
When range_series is a function (int -> [B]) a la series, this
returns a function int -> (A -> B) where A is the set represented by self
'''
raise NotImplementedError()
def random(self, size):
'''
Draw one random sample. TODO: take other configuration values
'''
raise NotImplementedError()
def map(self, fn):
'''
Maps a function over a set
'''
return Map(fn, self)
def flat_map(self, fn):
'''
FlatMaps a function over a set (i.e. monadic bind)
'''
return FlatMap(fn, self)
class Map(Gen):
'''
The set of inputs fn(x) where x is drawn from gen
'''
def __init__(self, fn, gen):
self.gen = gen
self.fn = fn
def series(self, size):
for elem in self.gen.series(size):
yield self.fn(elem)
def random(self, size):
return self.fn(self.gen.random(size))
class FlatMap(Gen):
'''
The set of inputs drawn from any Gen return by fn(x) for any x drawn from gen
'''
def __init__(self, fn, gen):
self.gen = gen
self.fn = fn
def series(self, size):
for elem in self.gen.series(size):
subgen = self.fn(elem)
for subelem in subgen.series(size):
yield subelem
class Empty(Gen):
'''
The empty set
'''
def __init__(self):
pass
def series(self, size):
return []
class Just(Gen):
'''
The set of input that is just its argument
'''
def __init__(self, val):
self.val = val
def series(self, size):
return [self.val]
def random(self, size):
return self.val
class Ints(Gen):
'''
All integers from -sys.maxint to sys.maxint
'''
def random(self, size):
return random.randint(-size, size)
def series(self, size):
yield 0
for i in xrange(1, size):
yield i
yield -i
class PosInts(Gen):
'''
All positive integers
'''
def random(self, size):
return random.randint(1, size)
def series(self, size):
for i in xrange(1, size):
yield i
class OneOf(Gen):
'''
The set of inputs drawn from a provided list
'''
def __init__(self, values):
self.values = values
def random(self, size):
return random.choice(self.values)
def series(self, size):
return self.values
class OneOfEach(Gen):
'''
OneOfEach(gens) is the set of lists l where l[i] is drawn from gens[i]
'''
def __init__(self, subgens):
self.subgens = subgens
def random(self, size):
return [gen.random(size) for gen in self.subgens]
def series(self, size):
if len(self.subgens) < 1:
yield []
else:
for head in self.subgens[0].series(size):
for tail in OneOfEach(self.subgens[1:]).series(size):
yield [head] + tail
class Dict(Gen):
'''
Dict(gendict) is the set of dictionaries where d[key] is drawn from gendict[key]
'''
def __init__(self, gendict):
self.one_of_each = OneOfEach([OneOfEach([Just(key), gen]) for key, gen in gendict.items()])
def random(self, size):
return dict(self.one_of_each.random(size))
def series(self, size):
for list_repr in self.one_of_each.series(size):
yield dict(list_repr)
class ResultOf(Gen):
'''
The set of inputs f(*args, **kwargs) where args is drawn from OneOfEach(*arg_gens) and kwargs is drawn from Dict(**kwarg_gens)
'''
def __init__(self, fn, *arg_gens, **kwarg_gens):
self.fn = fn
self.args_gen = OneOfEach(arg_gens)
self.kwargs_gen = Dict(kwarg_gens)
def series(self, size):
for args in self.args_gen.series(size-1):
for kwargs in self.kwargs_gen.series(size-1):
yield fn(*args, **kwargs)
def random(self, size):
args = self.args_gen.random(size-1)
kwargs = self.kwargs_gen.random(size-1)
return self.fn(*args, **kwargs)
class ListOfN(Gen):
'''
ListOfN(n, gen) is the set of lists of length n where each element is drawn from gen
'''
def __init__(self, n, gen):
self.n = n
self.gen = gen
def random(self, size):
return [self.gen.random(size-1) for __ in xrange(0, self.n)]
def series(self, size):
if self.n < 1:
yield []
else:
for head in self.gen.series(size-1):
for tail in ListOfN(self.n-1, self.gen).series(size-1):
yield [head] + tail
|
|
import sys
sys.path += ['../']
import torch
from torch import nn
from transformers import (
RobertaConfig,
RobertaModel,
RobertaForSequenceClassification,
RobertaTokenizer,
BertModel,
BertTokenizer,
BertConfig
)
import torch.nn.functional as F
from data.process_fn import triple_process_fn, triple2dual_process_fn
class EmbeddingMixin:
"""
Mixin for common functions in most embedding models. Each model should define its own bert-like backbone and forward.
We inherit from RobertaModel to use from_pretrained
"""
def __init__(self, model_argobj):
if model_argobj is None:
self.use_mean = False
else:
self.use_mean = model_argobj.use_mean
print("Using mean:", self.use_mean)
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding, nn.Conv1d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=0.02)
def masked_mean(self, t, mask):
s = torch.sum(t * mask.unsqueeze(-1).float(), axis=1)
d = mask.sum(axis=1, keepdim=True).float()
return s / d
def masked_mean_or_first(self, emb_all, mask):
# emb_all is a tuple from bert - sequence output, pooler
assert isinstance(emb_all, tuple)
if self.use_mean:
return self.masked_mean(emb_all[0], mask)
else:
return emb_all[0][:, 0]
def query_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
def body_emb(self, input_ids, attention_mask):
raise NotImplementedError("Please Implement this method")
class NLL(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs * a_embs).sum(-1).unsqueeze(1),
(q_embs * b_embs).sum(-1).unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
return (loss.mean(),)
class NLL_MultiChunk(EmbeddingMixin):
def forward(
self,
query_ids,
attention_mask_q,
input_ids_a=None,
attention_mask_a=None,
input_ids_b=None,
attention_mask_b=None,
is_query=True):
if input_ids_b is None and is_query:
return self.query_emb(query_ids, attention_mask_q)
elif input_ids_b is None:
return self.body_emb(query_ids, attention_mask_q)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
[batchS, full_length] = input_ids_a.size()
chunk_factor = full_length // self.base_len
# special handle of attention mask -----
attention_mask_body = attention_mask_a.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), a_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_a = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
# special handle of attention mask -----
attention_mask_body = attention_mask_b.reshape(
batchS, chunk_factor, -1)[:, :, 0] # [batchS, chunk_factor]
inverted_bias = ((1 - attention_mask_body) * (-9999)).float()
a12 = torch.matmul(
q_embs.unsqueeze(1), b_embs.transpose(
1, 2)) # [batch, 1, chunk_factor]
logits_b = (a12[:, 0, :] + inverted_bias).max(dim=-
1, keepdim=False).values # [batch]
# -------------------------------------
logit_matrix = torch.cat(
[logits_a.unsqueeze(1), logits_b.unsqueeze(1)], dim=1) # [B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0 * lsm[:, 0]
return (loss.mean(),)
class RobertaDot_NLL_LN(NLL, RobertaForSequenceClassification):
"""None
Compress embedding to 200d, then computes NLL loss.
"""
def __init__(self, config, model_argobj=None):
NLL.__init__(self, model_argobj)
RobertaForSequenceClassification.__init__(self, config)
self.embeddingHead = nn.Linear(config.hidden_size, 768)
self.norm = nn.LayerNorm(768)
self.apply(self._init_weights)
def query_emb(self, input_ids, attention_mask):
outputs1 = self.roberta(input_ids=input_ids,
attention_mask=attention_mask)
full_emb = self.masked_mean_or_first(outputs1, attention_mask)
query1 = self.norm(self.embeddingHead(full_emb))
return query1
def body_emb(self, input_ids, attention_mask):
return self.query_emb(input_ids, attention_mask)
class RobertaDot_CLF_ANN_NLL_MultiChunk(NLL_MultiChunk, RobertaDot_NLL_LN):
def __init__(self, config):
RobertaDot_NLL_LN.__init__(self, config)
self.base_len = 512
def body_emb(self, input_ids, attention_mask):
[batchS, full_length] = input_ids.size()
chunk_factor = full_length // self.base_len
input_seq = input_ids.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
attention_mask_seq = attention_mask.reshape(
batchS,
chunk_factor,
full_length //
chunk_factor).reshape(
batchS *
chunk_factor,
full_length //
chunk_factor)
outputs_k = self.roberta(input_ids=input_seq,
attention_mask=attention_mask_seq)
compressed_output_k = self.embeddingHead(
outputs_k[0]) # [batch, len, dim]
compressed_output_k = self.norm(compressed_output_k[:, 0, :])
[batch_expand, embeddingS] = compressed_output_k.size()
complex_emb_k = compressed_output_k.reshape(
batchS, chunk_factor, embeddingS)
return complex_emb_k # size [batchS, chunk_factor, embeddingS]
class HFBertEncoder(BertModel):
def __init__(self, config):
BertModel.__init__(self, config)
assert config.hidden_size > 0, 'Encoder hidden_size can\'t be zero'
self.init_weights()
@classmethod
def init_encoder(cls, args, dropout: float = 0.1):
cfg = BertConfig.from_pretrained("bert-base-uncased")
if dropout != 0:
cfg.attention_probs_dropout_prob = dropout
cfg.hidden_dropout_prob = dropout
return cls.from_pretrained("bert-base-uncased", config=cfg)
def forward(self, input_ids, attention_mask):
hidden_states = None
sequence_output, pooled_output = super().forward(input_ids=input_ids,
attention_mask=attention_mask)
pooled_output = sequence_output[:, 0, :]
return sequence_output, pooled_output, hidden_states
def get_out_size(self):
if self.encode_proj:
return self.encode_proj.out_features
return self.config.hidden_size
class BiEncoder(nn.Module):
""" Bi-Encoder model component. Encapsulates query/question and context/passage encoders.
"""
def __init__(self, args):
super(BiEncoder, self).__init__()
self.question_model = HFBertEncoder.init_encoder(args)
self.ctx_model = HFBertEncoder.init_encoder(args)
def query_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.question_model(input_ids, attention_mask)
return pooled_output
def body_emb(self, input_ids, attention_mask):
sequence_output, pooled_output, hidden_states = self.ctx_model(input_ids, attention_mask)
return pooled_output
def forward(self, query_ids, attention_mask_q, input_ids_a = None, attention_mask_a = None, input_ids_b = None, attention_mask_b = None):
if input_ids_b is None:
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
return (q_embs, a_embs)
q_embs = self.query_emb(query_ids, attention_mask_q)
a_embs = self.body_emb(input_ids_a, attention_mask_a)
b_embs = self.body_emb(input_ids_b, attention_mask_b)
logit_matrix = torch.cat([(q_embs*a_embs).sum(-1).unsqueeze(1), (q_embs*b_embs).sum(-1).unsqueeze(1)], dim=1) #[B, 2]
lsm = F.log_softmax(logit_matrix, dim=1)
loss = -1.0*lsm[:,0]
return (loss.mean(),)
# --------------------------------------------------
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (
RobertaConfig,
)
),
(),
)
default_process_fn = triple_process_fn
class MSMarcoConfig:
def __init__(self, name, model, process_fn=default_process_fn, use_mean=True, tokenizer_class=RobertaTokenizer, config_class=RobertaConfig):
self.name = name
self.process_fn = process_fn
self.model_class = model
self.use_mean = use_mean
self.tokenizer_class = tokenizer_class
self.config_class = config_class
configs = [
MSMarcoConfig(name="rdot_nll",
model=RobertaDot_NLL_LN,
use_mean=False,
),
MSMarcoConfig(name="rdot_nll_multi_chunk",
model=RobertaDot_CLF_ANN_NLL_MultiChunk,
use_mean=False,
),
MSMarcoConfig(name="dpr",
model=BiEncoder,
tokenizer_class=BertTokenizer,
config_class=BertConfig,
use_mean=False,
),
]
MSMarcoConfigDict = {cfg.name: cfg for cfg in configs}
|
|
"""
../wps.py?request=execute
&service=wps
&version=1.0.0
&identifier=esmvaltool-perfmetrics
&status=true
&storeExecuteResponse=true
"""
import datetime
import shutil
import netCDF4
import urlparse
from pywps.Process import WPSProcess
import os
import logging
from jinja2 import FileSystemLoader, Environment,select_autoescape
import glob
class Process(WPSProcess):
def __init__(self):
# init process
WPSProcess.__init__(self,
identifier="esmvaltool-portrait", # the same as the file name
version="1.0",
title="Portrait Grading Diagram",
storeSupported="True",
statusSupported="True",
abstract="Create a portrait grading diagram using ESMValTool (takes about 10 minutes).",
grassLocation=False)
model_names = ['None', 'ACCESS1-0', 'ACCESS1-3', 'bcc-csm1-1', 'EC-EARTH', 'MIROC5']
self.model1 = self.addLiteralInput(identifier="model1",
title="Model 1",
type="String",
abstract="Model name",
minOccurs=1,
maxOccurs=1,
default='ACCESS1-0',
allowedValues=model_names)
self.model2 = self.addLiteralInput(identifier="model2",
title="Model 2",
type="String",
abstract="Model name",
minOccurs=1,
maxOccurs=1,
default='ACCESS1-3',
allowedValues=model_names)
self.model3 = self.addLiteralInput(identifier="model3",
title="Model 3",
type="String",
abstract="Model name",
minOccurs=1,
maxOccurs=1,
allowedValues=model_names)
self.model4 = self.addLiteralInput(identifier="model4",
title="Model 4",
type="String",
abstract="Model name",
minOccurs=1,
maxOccurs=1,
allowedValues=model_names)
self.variable = self.addLiteralInput(identifier="variable",
title="Variable",
type="String",
default="tas",
minOccurs=1,
maxOccurs=1)
self.period = self.addLiteralInput(identifier="period",
title="Period of the data",
type="String",
default="Amon",
minOccurs=1,
maxOccurs=1)
self.experiment = self.addLiteralInput(identifier="experiment",
title="Experiment of the data",
type="String",
default="historical",
minOccurs=1,
maxOccurs=1)
self.ensemble_member = self.addLiteralInput(identifier="ensemble_member",
title="Ensemble member of the data",
type="String",
default="r1i1p1",
minOccurs=1,
maxOccurs=1)
self.startYear = self.addLiteralInput(identifier="startYear",
title="First year data used in plot",
type="Integer",
default=2003,
minOccurs=1,
maxOccurs=1)
self.endYear = self.addLiteralInput(identifier="endYear",
title="Last year data used in plot",
type="Integer",
default=2005,
minOccurs=1,
maxOccurs=1)
self.opendapURL = self.addLiteralOutput(identifier="opendapURL",
title="opendapURL",
type="String", )
self.plot = self.addComplexOutput(identifier = "plot",
title = "TimeseriesPlot",
formats = [
{"mimeType":"image/png"}
])
def execute(self):
self.status.set("starting", 0)
#print some debugging info
models = []
model_values = [self.model1.getValue(), self.model2.getValue(), self.model3.getValue(), self.model4.getValue()]
for value in model_values:
if value != 'None':
models.append(value)
variable = self.variable.getValue()
period = self.period.getValue()
experiment = self.experiment.getValue()
start_year = self.startYear.getValue()
end_year = self.endYear.getValue()
ensemble_member = self.ensemble_member.getValue()
logging.debug("models %s, variable %s, period %s, experiment %s, ensemble_member %s, start_year %s, end_year %s" % (models, variable, period, experiment, ensemble_member, start_year, end_year))
# This does not work atm.
# This allows the NetCDF library to find the users credentials (X509 cert)
# Set current working directory to user HOME dir
os.chdir(os.environ['HOME'])
# Create output folder name
output_folder_name = "WPS_" + self.identifier + "_" + datetime.datetime.now().strftime("%Y%m%dT%H%M%SZ")
logging.debug(os.environ['POF_OUTPUT_PATH'])
#OpenDAP Url prefix (hosted by portal)
output_folder_url = os.environ['POF_OUTPUT_URL'] + output_folder_name
#Filesystem output path
output_folder_path = os.path.join(os.environ['POF_OUTPUT_PATH'], output_folder_name)
logging.debug("output folder path is %s" % output_folder_path)
#Create output directory
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
#copy input files to scratch (in correct folders for esmvaltool)
#next, copy input netcdf to a location esmvaltool expects
# example cmpi5 esgf link
# http://esgf-data1.ceda.ac.uk/thredds/dodsC/esg_dataroot/cmip5/output1/CSIRO-BOM/ACCESS1-0/historical/mon/atmos/Amon/r1i1p1/v1/tas/tas_Amon_ACCESS1-0_historical_r1i1p1_185001-200512.nc
# esmvaltool data folder example
# ETHZ_CMIP5/historical/Amon/ta/bcc-csm1-1/r1i1p1/ta_Amon_bcc-csm1-1_historical_r1i1p1_200001-200212.nc
#description = <model> SOME DESCRIPTION FIELDS HERE </model>
model_descriptions = []
for model in models:
model_descriptions.append('CMIP5_ETHZ %s %s %s %s %s %s @{MODELPATH}/ETHZ_CMIP5/' % ( model, period, experiment, ensemble_member, start_year, end_year))
self.status.set("setting up namelist for esmvaltool", 10)
logging.debug("model descriptions now %s" % model_descriptions)
logging.debug("variable %s" % variable)
#create esmvaltool config (using template)
environment = Environment(loader=FileSystemLoader('/namelists'))
#autoescape=select_autoescape(['html', 'xml']))
template = environment.get_template('namelist_portrait.xml')
generated_namelist = template.render(models=model_descriptions, variable=variable, work_dir=output_folder_path)
logging.debug("template output = %s" % generated_namelist)
#write generated namelist to file
namelist_path = output_folder_path + "/" + 'namelist.xml'
namelist_fd = open(namelist_path, 'w')
namelist_fd.write(generated_namelist)
namelist_fd.close()
#run esmvaltool command
self.status.set("running esmvaltool", 20)
os.chdir('/src/ESMValTool')
self.cmd(['python', 'main.py', namelist_path])
#grep output from output folder
self.status.set("processing output", 90)
output_image = glob.glob(output_folder_path + "/perfmetrics_grading/*.png").pop()
logging.debug("output image path is %s" % output_image)
# rel_output_image = os.path.relpath(output_image, output_folder_path)
# plot_url = output_folder_url + "/" + rel_output_image
self.plot.setValue(output_image)
#KNMI WPS Specific Set output
output_nc = glob.glob(output_folder_path + "/perfmetrics_grading/*.nc").pop()
rel_output_nc = os.path.relpath(output_nc, output_folder_path)
url = output_folder_url + "/" + rel_output_nc
self.opendapURL.setValue(url);
self.status.set("ready", 100);
|
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import uccsd
from pyscf.cc import ccsd_lambda
einsum = lib.einsum
def kernel(mycc, eris=None, t1=None, t2=None, l1=None, l2=None,
max_cycle=50, tol=1e-8, verbose=logger.INFO):
if eris is None: eris = mycc.ao2mo()
return ccsd_lambda.kernel(mycc, eris, t1, t2, l1, l2, max_cycle, tol,
verbose, make_intermediates, update_lambda)
# l2, t2 as ijab
def make_intermediates(mycc, t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape
fooa = eris.focka[:nocca,:nocca]
fova = eris.focka[:nocca,nocca:]
fvoa = eris.focka[nocca:,:nocca]
fvva = eris.focka[nocca:,nocca:]
foob = eris.fockb[:noccb,:noccb]
fovb = eris.fockb[:noccb,noccb:]
fvob = eris.fockb[noccb:,:noccb]
fvvb = eris.fockb[noccb:,noccb:]
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
ovov = numpy.asarray(eris.ovov)
ovov = ovov - ovov.transpose(0,3,2,1)
OVOV = numpy.asarray(eris.OVOV)
OVOV = OVOV - OVOV.transpose(0,3,2,1)
ovOV = numpy.asarray(eris.ovOV)
v1a = fvva - einsum('ja,jb->ba', fova, t1a)
v1b = fvvb - einsum('ja,jb->ba', fovb, t1b)
v1a += einsum('jcka,jkbc->ba', ovov, tauaa) * .5
v1a -= einsum('jaKC,jKbC->ba', ovOV, tauab) * .5
v1a -= einsum('kaJC,kJbC->ba', ovOV, tauab) * .5
v1b += einsum('jcka,jkbc->ba', OVOV, taubb) * .5
v1b -= einsum('kcJA,kJcB->BA', ovOV, tauab) * .5
v1b -= einsum('jcKA,jKcB->BA', ovOV, tauab) * .5
v2a = fooa + einsum('ib,jb->ij', fova, t1a)
v2b = foob + einsum('ib,jb->ij', fovb, t1b)
v2a += einsum('ibkc,jkbc->ij', ovov, tauaa) * .5
v2a += einsum('ibKC,jKbC->ij', ovOV, tauab)
v2b += einsum('ibkc,jkbc->ij', OVOV, taubb) * .5
v2b += einsum('kcIB,kJcB->IJ', ovOV, tauab)
ovoo = numpy.asarray(eris.ovoo)
ovoo = ovoo - ovoo.transpose(2,1,0,3)
OVOO = numpy.asarray(eris.OVOO)
OVOO = OVOO - OVOO.transpose(2,1,0,3)
OVoo = numpy.asarray(eris.OVoo)
ovOO = numpy.asarray(eris.ovOO)
v2a -= numpy.einsum('ibkj,kb->ij', ovoo, t1a)
v2a += numpy.einsum('KBij,KB->ij', OVoo, t1b)
v2b -= numpy.einsum('ibkj,kb->ij', OVOO, t1b)
v2b += numpy.einsum('kbIJ,kb->IJ', ovOO, t1a)
v5a = fvoa + numpy.einsum('kc,jkbc->bj', fova, t2aa)
v5a += numpy.einsum('KC,jKbC->bj', fovb, t2ab)
v5b = fvob + numpy.einsum('kc,jkbc->bj', fovb, t2bb)
v5b += numpy.einsum('kc,kJcB->BJ', fova, t2ab)
tmp = fova - numpy.einsum('kdlc,ld->kc', ovov, t1a)
tmp += numpy.einsum('kcLD,LD->kc', ovOV, t1b)
v5a += einsum('kc,kb,jc->bj', tmp, t1a, t1a)
tmp = fovb - numpy.einsum('kdlc,ld->kc', OVOV, t1b)
tmp += numpy.einsum('ldKC,ld->KC', ovOV, t1a)
v5b += einsum('kc,kb,jc->bj', tmp, t1b, t1b)
v5a -= einsum('lckj,klbc->bj', ovoo, t2aa) * .5
v5a -= einsum('LCkj,kLbC->bj', OVoo, t2ab)
v5b -= einsum('LCKJ,KLBC->BJ', OVOO, t2bb) * .5
v5b -= einsum('lcKJ,lKcB->BJ', ovOO, t2ab)
oooo = numpy.asarray(eris.oooo)
OOOO = numpy.asarray(eris.OOOO)
ooOO = numpy.asarray(eris.ooOO)
woooo = einsum('icjl,kc->ikjl', ovoo, t1a)
wOOOO = einsum('icjl,kc->ikjl', OVOO, t1b)
wooOO = einsum('icJL,kc->ikJL', ovOO, t1a)
wooOO += einsum('JCil,KC->ilJK', OVoo, t1b)
woooo += (oooo - oooo.transpose(0,3,2,1)) * .5
wOOOO += (OOOO - OOOO.transpose(0,3,2,1)) * .5
wooOO += ooOO.copy()
woooo += einsum('icjd,klcd->ikjl', ovov, tauaa) * .25
wOOOO += einsum('icjd,klcd->ikjl', OVOV, taubb) * .25
wooOO += einsum('icJD,kLcD->ikJL', ovOV, tauab)
v4ovvo = einsum('jbld,klcd->jbck', ovov, t2aa)
v4ovvo += einsum('jbLD,kLcD->jbck', ovOV, t2ab)
v4ovvo += numpy.asarray(eris.ovvo)
v4ovvo -= numpy.asarray(eris.oovv).transpose(0,3,2,1)
v4OVVO = einsum('jbld,klcd->jbck', OVOV, t2bb)
v4OVVO += einsum('ldJB,lKdC->JBCK', ovOV, t2ab)
v4OVVO += numpy.asarray(eris.OVVO)
v4OVVO -= numpy.asarray(eris.OOVV).transpose(0,3,2,1)
v4OVvo = einsum('ldJB,klcd->JBck', ovOV, t2aa)
v4OVvo += einsum('JBLD,kLcD->JBck', OVOV, t2ab)
v4OVvo += numpy.asarray(eris.OVvo)
v4ovVO = einsum('jbLD,KLCD->jbCK', ovOV, t2bb)
v4ovVO += einsum('jbld,lKdC->jbCK', ovov, t2ab)
v4ovVO += numpy.asarray(eris.ovVO)
v4oVVo = einsum('jdLB,kLdC->jBCk', ovOV, t2ab)
v4oVVo -= numpy.asarray(eris.ooVV).transpose(0,3,2,1)
v4OvvO = einsum('lbJD,lKcD->JbcK', ovOV, t2ab)
v4OvvO -= numpy.asarray(eris.OOvv).transpose(0,3,2,1)
woovo = einsum('ibck,jb->ijck', v4ovvo, t1a)
wOOVO = einsum('ibck,jb->ijck', v4OVVO, t1b)
wOOvo = einsum('IBck,JB->IJck', v4OVvo, t1b)
wOOvo -= einsum('IbcK,jb->IKcj', v4OvvO, t1a)
wooVO = einsum('ibCK,jb->ijCK', v4ovVO, t1a)
wooVO -= einsum('iBCk,JB->ikCJ', v4oVVo, t1b)
woovo += ovoo.conj().transpose(3,2,1,0) * .5
wOOVO += OVOO.conj().transpose(3,2,1,0) * .5
wooVO += OVoo.conj().transpose(3,2,1,0)
wOOvo += ovOO.conj().transpose(3,2,1,0)
woovo -= einsum('iclk,jlbc->ikbj', ovoo, t2aa)
woovo += einsum('LCik,jLbC->ikbj', OVoo, t2ab)
wOOVO -= einsum('iclk,jlbc->ikbj', OVOO, t2bb)
wOOVO += einsum('lcIK,lJcB->IKBJ', ovOO, t2ab)
wooVO -= einsum('iclk,lJcB->ikBJ', ovoo, t2ab)
wooVO += einsum('LCik,JLBC->ikBJ', OVoo, t2bb)
wooVO -= einsum('icLK,jLcB->ijBK', ovOO, t2ab)
wOOvo -= einsum('ICLK,jLbC->IKbj', OVOO, t2ab)
wOOvo += einsum('lcIK,jlbc->IKbj', ovOO, t2aa)
wOOvo -= einsum('IClk,lJbC->IJbk', OVoo, t2ab)
wvvvo = einsum('jack,jb->back', v4ovvo, t1a)
wVVVO = einsum('jack,jb->back', v4OVVO, t1b)
wVVvo = einsum('JAck,JB->BAck', v4OVvo, t1b)
wVVvo -= einsum('jACk,jb->CAbk', v4oVVo, t1a)
wvvVO = einsum('jaCK,jb->baCK', v4ovVO, t1a)
wvvVO -= einsum('JacK,JB->caBK', v4OvvO, t1b)
wvvvo += einsum('lajk,jlbc->back', .25*ovoo, tauaa)
wVVVO += einsum('lajk,jlbc->back', .25*OVOO, taubb)
wVVvo -= einsum('LAjk,jLcB->BAck', OVoo, tauab)
wvvVO -= einsum('laJK,lJbC->baCK', ovOO, tauab)
w3a = numpy.einsum('jbck,jb->ck', v4ovvo, t1a)
w3a += numpy.einsum('JBck,JB->ck', v4OVvo, t1b)
w3b = numpy.einsum('jbck,jb->ck', v4OVVO, t1b)
w3b += numpy.einsum('jbCK,jb->CK', v4ovVO, t1a)
wovvo = v4ovvo
wOVVO = v4OVVO
wovVO = v4ovVO
wOVvo = v4OVvo
woVVo = v4oVVo
wOvvO = v4OvvO
wovvo += lib.einsum('jbld,kd,lc->jbck', ovov, t1a, -t1a)
wOVVO += lib.einsum('jbld,kd,lc->jbck', OVOV, t1b, -t1b)
wovVO += lib.einsum('jbLD,KD,LC->jbCK', ovOV, t1b, -t1b)
wOVvo += lib.einsum('ldJB,kd,lc->JBck', ovOV, t1a, -t1a)
woVVo += lib.einsum('jdLB,kd,LC->jBCk', ovOV, t1a, t1b)
wOvvO += lib.einsum('lbJD,KD,lc->JbcK', ovOV, t1b, t1a)
wovvo -= einsum('jblk,lc->jbck', ovoo, t1a)
wOVVO -= einsum('jblk,lc->jbck', OVOO, t1b)
wovVO -= einsum('jbLK,LC->jbCK', ovOO, t1b)
wOVvo -= einsum('JBlk,lc->JBck', OVoo, t1a)
woVVo += einsum('LBjk,LC->jBCk', OVoo, t1b)
wOvvO += einsum('lbJK,lc->JbcK', ovOO, t1a)
if nvira > 0 and nocca > 0:
ovvv = numpy.asarray(eris.get_ovvv())
ovvv = ovvv - ovvv.transpose(0,3,2,1)
v1a -= numpy.einsum('jabc,jc->ba', ovvv, t1a)
v5a += einsum('kdbc,jkcd->bj', ovvv, t2aa) * .5
woovo += einsum('idcb,kjbd->ijck', ovvv, tauaa) * .25
wovvo += einsum('jbcd,kd->jbck', ovvv, t1a)
wvvvo -= ovvv.conj().transpose(3,2,1,0) * .5
wvvvo += einsum('jacd,kjbd->cabk', ovvv, t2aa)
wvvVO += einsum('jacd,jKdB->caBK', ovvv, t2ab)
ovvv = tmp = None
if nvirb > 0 and noccb > 0:
OVVV = numpy.asarray(eris.get_OVVV())
OVVV = OVVV - OVVV.transpose(0,3,2,1)
v1b -= numpy.einsum('jabc,jc->ba', OVVV, t1b)
v5b += einsum('KDBC,JKCD->BJ', OVVV, t2bb) * .5
wOOVO += einsum('idcb,kjbd->ijck', OVVV, taubb) * .25
wOVVO += einsum('jbcd,kd->jbck', OVVV, t1b)
wVVVO -= OVVV.conj().transpose(3,2,1,0) * .5
wVVVO += einsum('jacd,kjbd->cabk', OVVV, t2bb)
wVVvo += einsum('JACD,kJbD->CAbk', OVVV, t2ab)
OVVV = tmp = None
if nvirb > 0 and nocca > 0:
OVvv = numpy.asarray(eris.get_OVvv())
v1a += numpy.einsum('JCba,JC->ba', OVvv, t1b)
v5a += einsum('KDbc,jKcD->bj', OVvv, t2ab)
wOOvo += einsum('IDcb,kJbD->IJck', OVvv, tauab)
wOVvo += einsum('JBcd,kd->JBck', OVvv, t1a)
wOvvO -= einsum('JDcb,KD->JbcK', OVvv, t1b)
wvvVO -= OVvv.conj().transpose(3,2,1,0)
wvvvo -= einsum('KDca,jKbD->cabj', OVvv, t2ab)
wvvVO -= einsum('KDca,JKBD->caBJ', OVvv, t2bb)
wVVvo += einsum('KAcd,jKdB->BAcj', OVvv, t2ab)
OVvv = tmp = None
if nvira > 0 and noccb > 0:
ovVV = numpy.asarray(eris.get_ovVV())
v1b += numpy.einsum('jcBA,jc->BA', ovVV, t1a)
v5b += einsum('kdBC,kJdC->BJ', ovVV, t2ab)
wooVO += einsum('idCB,jKdB->ijCK', ovVV, tauab)
wovVO += einsum('jbCD,KD->jbCK', ovVV, t1b)
woVVo -= einsum('jdCB,kd->jBCk', ovVV, t1a)
wVVvo -= ovVV.conj().transpose(3,2,1,0)
wVVVO -= einsum('kdCA,kJdB->CABJ', ovVV, t2ab)
wVVvo -= einsum('kdCA,jkbd->CAbj', ovVV, t2aa)
wvvVO += einsum('kaCD,kJbD->baCJ', ovVV, t2ab)
ovVV = tmp = None
w3a += v5a
w3b += v5b
w3a += lib.einsum('cb,jb->cj', v1a, t1a)
w3b += lib.einsum('cb,jb->cj', v1b, t1b)
w3a -= lib.einsum('jk,jb->bk', v2a, t1a)
w3b -= lib.einsum('jk,jb->bk', v2b, t1b)
class _IMDS: pass
imds = _IMDS()
imds.ftmp = lib.H5TmpFile()
dtype = numpy.result_type(t2ab, eris.vvvv).char
imds.woooo = imds.ftmp.create_dataset('woooo', (nocca,nocca,nocca,nocca), dtype)
imds.wooOO = imds.ftmp.create_dataset('wooOO', (nocca,nocca,noccb,noccb), dtype)
imds.wOOOO = imds.ftmp.create_dataset('wOOOO', (noccb,noccb,noccb,noccb), dtype)
imds.wovvo = imds.ftmp.create_dataset('wovvo', (nocca,nvira,nvira,nocca), dtype)
imds.wOVVO = imds.ftmp.create_dataset('wOVVO', (noccb,nvirb,nvirb,noccb), dtype)
imds.wovVO = imds.ftmp.create_dataset('wovVO', (nocca,nvira,nvirb,noccb), dtype)
imds.wOVvo = imds.ftmp.create_dataset('wOVvo', (noccb,nvirb,nvira,nocca), dtype)
imds.woVVo = imds.ftmp.create_dataset('woVVo', (nocca,nvirb,nvirb,nocca), dtype)
imds.wOvvO = imds.ftmp.create_dataset('wOvvO', (noccb,nvira,nvira,noccb), dtype)
imds.woovo = imds.ftmp.create_dataset('woovo', (nocca,nocca,nvira,nocca), dtype)
imds.wOOVO = imds.ftmp.create_dataset('wOOVO', (noccb,noccb,nvirb,noccb), dtype)
imds.wOOvo = imds.ftmp.create_dataset('wOOvo', (noccb,noccb,nvira,nocca), dtype)
imds.wooVO = imds.ftmp.create_dataset('wooVO', (nocca,nocca,nvirb,noccb), dtype)
imds.wvvvo = imds.ftmp.create_dataset('wvvvo', (nvira,nvira,nvira,nocca), dtype)
imds.wVVVO = imds.ftmp.create_dataset('wVVVO', (nvirb,nvirb,nvirb,noccb), dtype)
imds.wVVvo = imds.ftmp.create_dataset('wVVvo', (nvirb,nvirb,nvira,nocca), dtype)
imds.wvvVO = imds.ftmp.create_dataset('wvvVO', (nvira,nvira,nvirb,noccb), dtype)
imds.woooo[:] = woooo
imds.wOOOO[:] = wOOOO
imds.wooOO[:] = wooOO
imds.wovvo[:] = wovvo
imds.wOVVO[:] = wOVVO
imds.wovVO[:] = wovVO
imds.wOVvo[:] = wOVvo
imds.woVVo[:] = woVVo
imds.wOvvO[:] = wOvvO
imds.woovo[:] = woovo
imds.wOOVO[:] = wOOVO
imds.wOOvo[:] = wOOvo
imds.wooVO[:] = wooVO
imds.wvvvo[:] = wvvvo
imds.wVVVO[:] = wVVVO
imds.wVVvo[:] = wVVvo
imds.wvvVO[:] = wvvVO
imds.v1a = v1a
imds.v1b = v1b
imds.v2a = v2a
imds.v2b = v2b
imds.w3a = w3a
imds.w3b = w3b
imds.ftmp.flush()
return imds
# update L1, L2
def update_lambda(mycc, t1, t2, l1, l2, eris, imds):
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(mycc.stdout, mycc.verbose)
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
l1a, l1b = l1
l2aa, l2ab, l2bb = l2
nocca, nvira = t1a.shape
noccb, nvirb = t1b.shape
u1a = numpy.zeros_like(l1a)
u1b = numpy.zeros_like(l1b)
u2aa = numpy.zeros_like(l2aa)
u2ab = numpy.zeros_like(l2ab)
u2bb = numpy.zeros_like(l2bb)
mo_ea_o = eris.mo_energy[0][:nocca]
mo_ea_v = eris.mo_energy[0][nocca:] + mycc.level_shift
mo_eb_o = eris.mo_energy[1][:noccb]
mo_eb_v = eris.mo_energy[1][noccb:] + mycc.level_shift
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
v1a = imds.v1a - numpy.diag(mo_ea_v)
v1b = imds.v1b - numpy.diag(mo_eb_v)
v2a = imds.v2a - numpy.diag(mo_ea_o)
v2b = imds.v2b - numpy.diag(mo_eb_o)
mvv = einsum('klca,klcb->ba', l2aa, t2aa) * .5
mvv+= einsum('lKaC,lKbC->ba', l2ab, t2ab)
mVV = einsum('klca,klcb->ba', l2bb, t2bb) * .5
mVV+= einsum('kLcA,kLcB->BA', l2ab, t2ab)
moo = einsum('kicd,kjcd->ij', l2aa, t2aa) * .5
moo+= einsum('iKdC,jKdC->ij', l2ab, t2ab)
mOO = einsum('kicd,kjcd->ij', l2bb, t2bb) * .5
mOO+= einsum('kIcD,kJcD->IJ', l2ab, t2ab)
#m3 = lib.einsum('ijcd,cdab->ijab', l2, eris.vvvv) * .5
m3aa, m3ab, m3bb = mycc._add_vvvv(None, (l2aa.conj(),l2ab.conj(),l2bb.conj()), eris)
m3aa = m3aa.conj()
m3ab = m3ab.conj()
m3bb = m3bb.conj()
m3aa += lib.einsum('klab,ikjl->ijab', l2aa, numpy.asarray(imds.woooo))
m3bb += lib.einsum('klab,ikjl->ijab', l2bb, numpy.asarray(imds.wOOOO))
m3ab += lib.einsum('kLaB,ikJL->iJaB', l2ab, numpy.asarray(imds.wooOO))
ovov = numpy.asarray(eris.ovov)
ovov = ovov - ovov.transpose(0,3,2,1)
OVOV = numpy.asarray(eris.OVOV)
OVOV = OVOV - OVOV.transpose(0,3,2,1)
ovOV = numpy.asarray(eris.ovOV)
mvv1 = einsum('jc,jb->bc', l1a, t1a) + mvv
mVV1 = einsum('jc,jb->bc', l1b, t1b) + mVV
moo1 = einsum('ic,kc->ik', l1a, t1a) + moo
mOO1 = einsum('ic,kc->ik', l1b, t1b) + mOO
if nvira > 0 and nocca > 0:
ovvv = numpy.asarray(eris.get_ovvv())
ovvv = ovvv - ovvv.transpose(0,3,2,1)
tmp = lib.einsum('ijcd,kd->ijck', l2aa, t1a)
m3aa -= lib.einsum('kbca,ijck->ijab', ovvv, tmp)
tmp = einsum('ic,jbca->jiba', l1a, ovvv)
tmp+= einsum('kiab,jk->ijab', l2aa, v2a)
tmp-= einsum('ik,kajb->ijab', moo1, ovov)
u2aa += tmp - tmp.transpose(1,0,2,3)
u1a += numpy.einsum('iacb,bc->ia', ovvv, mvv1)
ovvv = tmp = None
if nvirb > 0 and noccb > 0:
OVVV = numpy.asarray(eris.get_OVVV())
OVVV = OVVV - OVVV.transpose(0,3,2,1)
tmp = lib.einsum('ijcd,kd->ijck', l2bb, t1b)
m3bb -= lib.einsum('kbca,ijck->ijab', OVVV, tmp)
tmp = einsum('ic,jbca->jiba', l1b, OVVV)
tmp+= einsum('kiab,jk->ijab', l2bb, v2b)
tmp-= einsum('ik,kajb->ijab', mOO1, OVOV)
u2bb += tmp - tmp.transpose(1,0,2,3)
u1b += numpy.einsum('iaCB,BC->ia', OVVV, mVV1)
OVVV = tmp = None
if nvirb > 0 and nocca > 0:
OVvv = numpy.asarray(eris.get_OVvv())
tmp = lib.einsum('iJcD,KD->iJcK', l2ab, t1b)
m3ab -= lib.einsum('KBca,iJcK->iJaB', OVvv, tmp)
tmp = einsum('ic,JAcb->JibA', l1a, OVvv)
tmp-= einsum('kIaB,jk->IjaB', l2ab, v2a)
tmp-= einsum('IK,jaKB->IjaB', mOO1, ovOV)
u2ab += tmp.transpose(1,0,2,3)
u1b += numpy.einsum('iacb,bc->ia', OVvv, mvv1)
OVvv = tmp = None
if nvira > 0 and noccb > 0:
ovVV = numpy.asarray(eris.get_ovVV())
tmp = lib.einsum('iJdC,kd->iJCk', l2ab, t1a)
m3ab -= lib.einsum('kaCB,iJCk->iJaB', ovVV, tmp)
tmp = einsum('IC,jbCA->jIbA', l1b, ovVV)
tmp-= einsum('iKaB,JK->iJaB', l2ab, v2b)
tmp-= einsum('ik,kaJB->iJaB', moo1, ovOV)
u2ab += tmp
u1a += numpy.einsum('iaCB,BC->ia', ovVV, mVV1)
ovVV = tmp = None
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
tmp = lib.einsum('ijcd,klcd->ijkl', l2aa, tauaa)
ovov = numpy.asarray(eris.ovov)
ovov = ovov - ovov.transpose(0,3,2,1)
m3aa += lib.einsum('kalb,ijkl->ijab', ovov, tmp) * .25
tmp = lib.einsum('ijcd,klcd->ijkl', l2bb, taubb)
OVOV = numpy.asarray(eris.OVOV)
OVOV = OVOV - OVOV.transpose(0,3,2,1)
m3bb += lib.einsum('kalb,ijkl->ijab', OVOV, tmp) * .25
tmp = lib.einsum('iJcD,kLcD->iJkL', l2ab, tauab)
ovOV = numpy.asarray(eris.ovOV)
m3ab += lib.einsum('kaLB,iJkL->iJaB', ovOV, tmp) * .5
tmp = lib.einsum('iJdC,lKdC->iJKl', l2ab, tauab)
m3ab += lib.einsum('laKB,iJKl->iJaB', ovOV, tmp) * .5
u1a += numpy.einsum('ijab,jb->ia', m3aa, t1a)
u1a += numpy.einsum('iJaB,JB->ia', m3ab, t1b)
u1b += numpy.einsum('IJAB,JB->IA', m3bb, t1b)
u1b += numpy.einsum('jIbA,jb->IA', m3ab, t1a)
u2aa += m3aa
u2bb += m3bb
u2ab += m3ab
u2aa += ovov.transpose(0,2,1,3)
u2bb += OVOV.transpose(0,2,1,3)
u2ab += ovOV.transpose(0,2,1,3)
fov1 = fova + numpy.einsum('kcjb,kc->jb', ovov, t1a)
fov1+= numpy.einsum('jbKC,KC->jb', ovOV, t1b)
tmp = numpy.einsum('ia,jb->ijab', l1a, fov1)
tmp+= einsum('kica,jbck->ijab', l2aa, imds.wovvo)
tmp+= einsum('iKaC,jbCK->ijab', l2ab, imds.wovVO)
tmp = tmp - tmp.transpose(1,0,2,3)
u2aa += tmp - tmp.transpose(0,1,3,2)
fov1 = fovb + numpy.einsum('kcjb,kc->jb', OVOV, t1b)
fov1+= numpy.einsum('kcJB,kc->JB', ovOV, t1a)
tmp = numpy.einsum('ia,jb->ijab', l1b, fov1)
tmp+= einsum('kica,jbck->ijab', l2bb, imds.wOVVO)
tmp+= einsum('kIcA,JBck->IJAB', l2ab, imds.wOVvo)
tmp = tmp - tmp.transpose(1,0,2,3)
u2bb += tmp - tmp.transpose(0,1,3,2)
fov1 = fovb + numpy.einsum('kcjb,kc->jb', OVOV, t1b)
fov1+= numpy.einsum('kcJB,kc->JB', ovOV, t1a)
u2ab += numpy.einsum('ia,JB->iJaB', l1a, fov1)
u2ab += einsum('iKaC,JBCK->iJaB', l2ab, imds.wOVVO)
u2ab += einsum('kica,JBck->iJaB', l2aa, imds.wOVvo)
u2ab += einsum('kIaC,jBCk->jIaB', l2ab, imds.woVVo)
u2ab += einsum('iKcA,JbcK->iJbA', l2ab, imds.wOvvO)
fov1 = fova + numpy.einsum('kcjb,kc->jb', ovov, t1a)
fov1+= numpy.einsum('jbKC,KC->jb', ovOV, t1b)
u2ab += numpy.einsum('ia,jb->jiba', l1b, fov1)
u2ab += einsum('kIcA,jbck->jIbA', l2ab, imds.wovvo)
u2ab += einsum('KICA,jbCK->jIbA', l2bb, imds.wovVO)
ovoo = numpy.asarray(eris.ovoo)
ovoo = ovoo - ovoo.transpose(2,1,0,3)
OVOO = numpy.asarray(eris.OVOO)
OVOO = OVOO - OVOO.transpose(2,1,0,3)
OVoo = numpy.asarray(eris.OVoo)
ovOO = numpy.asarray(eris.ovOO)
tmp = einsum('ka,jbik->ijab', l1a, ovoo)
tmp+= einsum('ijca,cb->ijab', l2aa, v1a)
tmp+= einsum('ca,icjb->ijab', mvv1, ovov)
u2aa -= tmp - tmp.transpose(0,1,3,2)
tmp = einsum('ka,jbik->ijab', l1b, OVOO)
tmp+= einsum('ijca,cb->ijab', l2bb, v1b)
tmp+= einsum('ca,icjb->ijab', mVV1, OVOV)
u2bb -= tmp - tmp.transpose(0,1,3,2)
u2ab -= einsum('ka,JBik->iJaB', l1a, OVoo)
u2ab += einsum('iJaC,CB->iJaB', l2ab, v1b)
u2ab -= einsum('ca,icJB->iJaB', mvv1, ovOV)
u2ab -= einsum('KA,ibJK->iJbA', l1b, ovOO)
u2ab += einsum('iJcA,cb->iJbA', l2ab, v1a)
u2ab -= einsum('CA,ibJC->iJbA', mVV1, ovOV)
u1a += fova
u1b += fovb
u1a += einsum('ib,ba->ia', l1a, v1a)
u1a -= einsum('ja,ij->ia', l1a, v2a)
u1b += einsum('ib,ba->ia', l1b, v1b)
u1b -= einsum('ja,ij->ia', l1b, v2b)
u1a += numpy.einsum('jb,iabj->ia', l1a, eris.ovvo)
u1a -= numpy.einsum('jb,ijba->ia', l1a, eris.oovv)
u1a += numpy.einsum('JB,iaBJ->ia', l1b, eris.ovVO)
u1b += numpy.einsum('jb,iabj->ia', l1b, eris.OVVO)
u1b -= numpy.einsum('jb,ijba->ia', l1b, eris.OOVV)
u1b += numpy.einsum('jb,iabj->ia', l1a, eris.OVvo)
u1a -= einsum('kjca,ijck->ia', l2aa, imds.woovo)
u1a -= einsum('jKaC,ijCK->ia', l2ab, imds.wooVO)
u1b -= einsum('kjca,ijck->ia', l2bb, imds.wOOVO)
u1b -= einsum('kJcA,IJck->IA', l2ab, imds.wOOvo)
u1a -= einsum('ikbc,back->ia', l2aa, imds.wvvvo)
u1a -= einsum('iKbC,baCK->ia', l2ab, imds.wvvVO)
u1b -= einsum('IKBC,BACK->IA', l2bb, imds.wVVVO)
u1b -= einsum('kIcB,BAck->IA', l2ab, imds.wVVvo)
u1a += numpy.einsum('jiba,bj->ia', l2aa, imds.w3a)
u1a += numpy.einsum('iJaB,BJ->ia', l2ab, imds.w3b)
u1b += numpy.einsum('JIBA,BJ->IA', l2bb, imds.w3b)
u1b += numpy.einsum('jIbA,bj->IA', l2ab, imds.w3a)
tmpa = t1a + numpy.einsum('kc,kjcb->jb', l1a, t2aa)
tmpa += numpy.einsum('KC,jKbC->jb', l1b, t2ab)
tmpa -= einsum('bd,jd->jb', mvv1, t1a)
tmpa -= einsum('lj,lb->jb', moo, t1a)
tmpb = t1b + numpy.einsum('kc,kjcb->jb', l1b, t2bb)
tmpb += numpy.einsum('kc,kJcB->JB', l1a, t2ab)
tmpb -= einsum('bd,jd->jb', mVV1, t1b)
tmpb -= einsum('lj,lb->jb', mOO, t1b)
u1a += numpy.einsum('jbia,jb->ia', ovov, tmpa)
u1a += numpy.einsum('iaJB,JB->ia', ovOV, tmpb)
u1b += numpy.einsum('jbia,jb->ia', OVOV, tmpb)
u1b += numpy.einsum('jbIA,jb->IA', ovOV, tmpa)
u1a -= numpy.einsum('iajk,kj->ia', ovoo, moo1)
u1a -= numpy.einsum('iaJK,KJ->ia', ovOO, mOO1)
u1b -= numpy.einsum('iajk,kj->ia', OVOO, mOO1)
u1b -= numpy.einsum('IAjk,kj->IA', OVoo, moo1)
tmp = fova - numpy.einsum('kbja,jb->ka', ovov, t1a)
tmp += numpy.einsum('kaJB,JB->ka', ovOV, t1b)
u1a -= lib.einsum('ik,ka->ia', moo, tmp)
u1a -= lib.einsum('ca,ic->ia', mvv, tmp)
tmp = fovb - numpy.einsum('kbja,jb->ka', OVOV, t1b)
tmp += numpy.einsum('jbKA,jb->KA', ovOV, t1a)
u1b -= lib.einsum('ik,ka->ia', mOO, tmp)
u1b -= lib.einsum('ca,ic->ia', mVV, tmp)
eia = lib.direct_sum('i-j->ij', mo_ea_o, mo_ea_v)
eIA = lib.direct_sum('i-j->ij', mo_eb_o, mo_eb_v)
u1a /= eia
u1b /= eIA
u2aa /= lib.direct_sum('ia+jb->ijab', eia, eia)
u2ab /= lib.direct_sum('ia+jb->ijab', eia, eIA)
u2bb /= lib.direct_sum('ia+jb->ijab', eIA, eIA)
time0 = log.timer_debug1('update l1 l2', *time0)
return (u1a,u1b), (u2aa,u2ab,u2bb)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
from pyscf.cc import gccsd
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run()
mycc = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
eris = mycc.ao2mo()
mycc.kernel()
l1, l2 = mycc.solve_lambda(mycc.t1, mycc.t2, eris=eris)
l1ref = mycc.spin2spatial(l1, mycc.mo_coeff.orbspin)
l2ref = mycc.spin2spatial(l2, mycc.mo_coeff.orbspin)
mycc = uccsd.UCCSD(mf)
eris = mycc.ao2mo()
mycc.kernel()
conv, l1, l2 = kernel(mycc, eris, mycc.t1, mycc.t2, tol=1e-8)
print(abs(l1[0]-l1ref[0]).max())
print(abs(l1[1]-l1ref[1]).max())
print(abs(l2[0]-l2ref[0]).max())
print(abs(l2[1]-l2ref[1]).max())
print(abs(l2[2]-l2ref[2]).max())
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for model_search.task_manager."""
import collections
from absl.testing import parameterized
from model_search import hparam as hp
from model_search import loss_fns
from model_search import task_manager
from model_search.architecture import architecture_utils
from model_search.generators import trial_utils
from model_search.proto import phoenix_spec_pb2
import numpy as np
import tensorflow.compat.v2 as tf
from google.protobuf import text_format
def _loss_fn(labels, logits, weights=1.0):
"""Cross entropy loss fn."""
label_ids = tf.squeeze(labels)
if label_ids.dtype == tf.float32:
label_ids = tf.cast(label_ids, 'int32')
one_hot_labels = tf.one_hot(indices=label_ids, depth=logits.shape[-1])
return tf.reduce_mean(
input_tensor=tf.compat.v1.losses.softmax_cross_entropy(
onehot_labels=one_hot_labels, logits=logits, weights=weights))
def _default_predictions_fn(logits,
mode=tf.estimator.ModeKeys.TRAIN,
temperature=1.0):
"""Converts logits to predictions dict. Assumes classification."""
new_logits = logits
if mode == tf.estimator.ModeKeys.PREDICT and temperature != 1.0:
temp_const = tf.constant(1 / temperature, name='softmax_temperature_const')
new_logits = tf.multiply(logits, temp_const, name='softmax_temperature_mul')
predictions = tf.math.argmax(input=new_logits, axis=-1)
probabilities = tf.nn.softmax(new_logits)
log_probabilities = tf.nn.log_softmax(new_logits)
predictions_dict = {
'predictions': predictions,
'probabilities': probabilities,
'log_probabilities': log_probabilities,
}
return predictions_dict
class TaskManagerTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'l2_reg',
'learning_rate_spec': {
'learning_rate': 0.001,
'l2_regularization': 0.01
},
'contains_node': 'l2_weight_loss',
'not_containing': ['clip_by_global_norm', 'ExponentialDecay']
}, {
'testcase_name': 'clipping',
'learning_rate_spec': {
'learning_rate': 0.001,
'gradient_max_norm': 3
},
'contains_node': 'clip_by_global_norm',
'not_containing': ['l2_weight_loss', 'ExponentialDecay']
}, {
'testcase_name': 'decay',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'contains_node': 'ExponentialDecay',
'not_containing': ['l2_weight_loss', 'clip_by_global_norm']
})
def test_learning_spec(self, learning_rate_spec, contains_node,
not_containing):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower', ['logits_spec'])
towers = {'search_generator': [fake_tower(logits_spec)]}
features = {'x': tf.zeros([10, 10])}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=tf.ones([20], dtype=tf.int32),
mode=tf.estimator.ModeKeys.TRAIN,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
my_id=1,
model_directory=self.get_temp_dir(),
use_tpu=False,
predictions_fn=_default_predictions_fn)
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if contains_node in node.name
])
for phrase in not_containing:
self.assertEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if phrase in node.name
])
self.assertLen(model.predictions, 3)
self.assertIn('probabilities', model.predictions)
self.assertIn('log_probabilities', model.predictions)
self.assertIn('predictions', model.predictions)
@parameterized.named_parameters(
{
'testcase_name':
'l2_reg',
'learning_rate_spec': {
'learning_rate': 0.001,
'l2_regularization': 0.01
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
}, {
'testcase_name':
'clipping',
'learning_rate_spec': {
'learning_rate': 0.001,
'gradient_max_norm': 3
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
}, {
'testcase_name':
'decay',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
})
def test_learning_spec_on_eval(self, learning_rate_spec, not_containing):
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower', ['logits_spec'])
towers = {'search_generator': [fake_tower(logits_spec)]}
features = {'x': tf.zeros([10, 10])}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=tf.ones([20], dtype=tf.int32),
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.EVAL,
my_id=1,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=False,
predictions_fn=_default_predictions_fn)
for phrase in not_containing:
self.assertEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if phrase in node.name
])
self.assertLen(model.predictions, 3)
self.assertIn('probabilities', model.predictions)
self.assertIn('log_probabilities', model.predictions)
self.assertIn('predictions', model.predictions)
self.assertNotEqual(model.loss, None)
@parameterized.named_parameters(
{
'testcase_name':
'l2_reg',
'learning_rate_spec': {
'learning_rate': 0.001,
'l2_regularization': 0.01
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
}, {
'testcase_name':
'clipping',
'learning_rate_spec': {
'learning_rate': 0.001,
'gradient_max_norm': 3
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
}, {
'testcase_name':
'decay',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'not_containing':
['l2_weight_loss', 'clip_by_global_norm', 'ExponentialDecay']
})
def test_learning_spec_on_predict(self, learning_rate_spec, not_containing):
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower', ['logits_spec'])
towers = {'search_generator': [fake_tower(logits_spec)]}
features = {'x': tf.zeros([10, 10])}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=tf.ones([20], dtype=tf.int32),
mode=tf.estimator.ModeKeys.PREDICT,
model_directory=self.get_temp_dir(),
use_tpu=False,
my_id=1,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
predictions_fn=_default_predictions_fn)
for phrase in not_containing:
self.assertEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if phrase in node.name
])
self.assertLen(model.predictions, 3)
self.assertIn('probabilities', model.predictions)
self.assertIn('log_probabilities', model.predictions)
self.assertIn('predictions', model.predictions)
self.assertIsNone(model.loss)
def test_tpu(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
learning_rate_spec = {'learning_rate': 0.001, 'gradient_max_norm': 3}
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower', ['logits_spec'])
towers = {'search_generator': [fake_tower(logits_spec)]}
features = {'x': tf.zeros([10, 10])}
_ = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=tf.ones([20], dtype=tf.int32),
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
my_id=1,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=True,
predictions_fn=_default_predictions_fn)
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'CrossReplicaSum' in node.name
])
@parameterized.named_parameters(
{
'testcase_name':
'l2_reg',
'learning_rate_spec': {
'learning_rate': 0.001,
'l2_regularization': 0.01
},
'contains_node':
'l2_weight_loss',
'not_containing': [
'label1/clip_by_global_norm', 'label1/ExponentialDecay',
'label2/clip_by_global_norm', 'label2/ExponentialDecay'
]
}, {
'testcase_name':
'clipping',
'learning_rate_spec': {
'learning_rate': 0.001,
'gradient_max_norm': 3
},
'contains_node':
'clip_by_global_norm',
'not_containing': [
'label1/l2_weight_loss', 'label1/ExponentialDecay',
'label2/l2_weight_loss', 'label2/ExponentialDecay'
]
}, {
'testcase_name':
'decay',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'contains_node':
'ExponentialDecay',
'not_containing': [
'label1/l2_weight_loss', 'label1/clip_by_global_norm',
'label2/l2_weight_loss', 'label2/clip_by_global_norm'
]
}, {
'testcase_name': 'multi_loss_and_pred',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'contains_node': 'ExponentialDecay',
'not_containing': [
'label1/l2_weight_loss', 'label1/clip_by_global_norm',
'label2/l2_weight_loss', 'label2/clip_by_global_norm'
],
'multi_loss': True,
'multi_prediction': True
}, {
'testcase_name': 'l2_reg_merged',
'learning_rate_spec': {
'learning_rate': 0.001,
'l2_regularization': 0.01
},
'contains_node': 'gradients/AddN',
'not_containing': [
'label1/clip_by_global_norm', 'label1/ExponentialDecay',
'label2/clip_by_global_norm', 'label2/ExponentialDecay'
],
'merge_losses': True
}, {
'testcase_name': 'clipping_merged',
'learning_rate_spec': {
'learning_rate': 0.001,
'gradient_max_norm': 3
},
'contains_node': 'gradients/AddN',
'not_containing': [
'label1/l2_weight_loss', 'label1/ExponentialDecay',
'label2/l2_weight_loss', 'label2/ExponentialDecay'
],
'merge_losses': True
}, {
'testcase_name': 'decay_merged',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'contains_node': 'gradients/AddN',
'not_containing': [
'label1/l2_weight_loss', 'label1/clip_by_global_norm',
'label2/l2_weight_loss', 'label2/clip_by_global_norm'
],
'merge_losses': True
}, {
'testcase_name': 'multi_loss_and_pred_merged',
'learning_rate_spec': {
'learning_rate': 0.001,
'exponential_decay_steps': 100,
'exponential_decay_rate': 0.7
},
'contains_node': 'gradients/AddN',
'not_containing': [
'label1/l2_weight_loss', 'label1/clip_by_global_norm',
'label2/l2_weight_loss', 'label2/clip_by_global_norm'
],
'multi_loss': True,
'multi_prediction': True,
'merge_losses': True
})
def test_multitask(self,
learning_rate_spec,
contains_node,
not_containing,
multi_loss=False,
multi_prediction=False,
merge_losses=False):
# Force graph mode
with tf.compat.v1.Graph().as_default():
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
}
multi_task_spec {
label_name: "label2"
number_of_classes: 10
}
""", spec)
spec.merge_losses_of_multitask = merge_losses
loss_fn = loss_fns.make_multi_class_loss_fn()
if multi_loss:
loss_fn = {
'label1': loss_fns.make_multi_class_loss_fn(),
'label2': loss_fns.make_multi_class_loss_fn()
}
task_manager_instance = task_manager.TaskManager(
spec, logits_dimension=None, loss_fn=loss_fn, head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower',
['logits_spec', 'previous_model_dir'])
towers = {'search_generator': [fake_tower(logits_spec, None)]}
features = {'x': tf.zeros([10, 10])}
prediction_fn = _default_predictions_fn
if multi_prediction:
prediction_fn = {
'label1': _default_predictions_fn,
'label2': _default_predictions_fn
}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels={
'label1': tf.ones([20], dtype=tf.int32),
'label2': tf.ones([20], dtype=tf.int32)
},
my_id=1,
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=False,
predictions_fn=prediction_fn)
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if contains_node in node.name
])
for phrase in not_containing:
self.assertEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if phrase in node.name
])
self.assertLen(model.predictions, 3 * (1 + 2))
self.assertContainsSubset([
'probabilities',
'probabilities/label1',
'probabilities/label2',
'log_probabilities',
'log_probabilities/label1',
'log_probabilities/label2',
'predictions',
'predictions/label1',
'predictions/label2',
], model.predictions.keys())
@parameterized.named_parameters(
{
'testcase_name': 'feature_weight_vanilla',
'is_multitask': False,
'weight_is_a_feature': False
}, {
'testcase_name': 'feature_weight_mutitask',
'is_multitask': True,
'weight_is_a_feature': False
}, {
'testcase_name': 'feature_weight_in_labels',
'is_multitask': False,
'weight_is_a_feature': True
}, {
'testcase_name': 'feature_weight_multitask_in_labels',
'is_multitask': True,
'weight_is_a_feature': True
})
def test_weight_feature(self, is_multitask, weight_is_a_feature):
# Force graph mode
with tf.compat.v1.Graph().as_default():
learning_rate_spec = {'learning_rate': 0.001, 'gradient_max_norm': 3}
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
labels = tf.ones([20], dtype=tf.int32)
if is_multitask:
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
weight_feature_name: "weight1"
weight_is_a_feature: %s
}
multi_task_spec {
label_name: "label2"
number_of_classes: 10
weight_feature_name: "weight2"
weight_is_a_feature: %s
}
""" % (str(weight_is_a_feature), str(weight_is_a_feature)), spec)
labels = {
'label1': tf.ones([20], dtype=tf.int32),
'label2': tf.ones([20], dtype=tf.int32)
}
weights = {
'weight1': tf.constant([2] * 20),
'weight2': tf.constant([3] * 20)
}
features = {'x': tf.zeros([10, 10])}
if weight_is_a_feature:
features.update(weights)
elif isinstance(labels, dict):
labels.update(weights)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower',
['logits_spec', 'previous_model_dir'])
towers = {'search_generator': [fake_tower(logits_spec, None)]}
_ = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=labels,
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
my_id=1,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=False,
predictions_fn=_default_predictions_fn)
@parameterized.named_parameters(
{
'testcase_name': 'feature_weight_mutitask',
'weight_is_a_feature': False
}, {
'testcase_name': 'feature_weight_multitask_in_labels',
'weight_is_a_feature': True
})
def test_wrong_dict_weight_feature(self, weight_is_a_feature):
learning_rate_spec = {'learning_rate': 0.001, 'gradient_max_norm': 3}
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
weight_feature_name: "weight1"
weight_is_a_feature: %s
}
multi_task_spec {
label_name: "label2"
number_of_classes: 10
weight_feature_name: "weight2"
weight_is_a_feature: %s
}
""" % (str(weight_is_a_feature), str(weight_is_a_feature)), spec)
labels = {
'label1': tf.ones([20], dtype=tf.int32),
'label2': tf.ones([20], dtype=tf.int32),
}
# Fix the size of the dict labels to bypass the assertion.
if not weight_is_a_feature:
labels.update({
'not_used': tf.ones([20], dtype=tf.int32),
'not_used2': tf.ones([20], dtype=tf.int32)
})
weights = {
'weight1': tf.constant([2] * 20),
'weight2': tf.constant([3] * 20)
}
features = {'x': tf.zeros([10, 10])}
if not weight_is_a_feature:
features.update(weights)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower',
['logits_spec', 'previous_model_dir'])
towers = {'search_generator': [fake_tower(logits_spec, None)]}
with self.assertRaises(KeyError):
_ = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels=labels,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
my_id=1,
use_tpu=False,
predictions_fn=_default_predictions_fn)
def test_architecture(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
learning_rate_spec = {'learning_rate': 0.001, 'gradient_max_norm': 3}
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.CNN)
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
architecture: "FIXED_OUTPUT_FULLY_CONNECTED_128"
}
multi_task_spec {
label_name: "label2"
number_of_classes: 10
architecture: "FIXED_OUTPUT_FULLY_CONNECTED_256"
architecture: "FIXED_OUTPUT_FULLY_CONNECTED_512"
}
""", spec)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower',
['logits_spec', 'previous_model_dir'])
towers = {'search_generator': [fake_tower(logits_spec, None)]}
features = {'x': tf.zeros([10, 10])}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels={
'label1': tf.ones([20], dtype=tf.int32),
'label2': tf.ones([20], dtype=tf.int32)
},
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
my_id=1,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=False,
predictions_fn=_default_predictions_fn)
# pylint: disable=g-complex-comprehension
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'label1_0_search_generator/1_FIXED_OUTPUT_FULLY_CONNECTED_128' in
node.name
])
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'label2_0_search_generator/1_FIXED_OUTPUT_FULLY_CONNECTED_256' in
node.name
])
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'label2_0_search_generator/2_FIXED_OUTPUT_FULLY_CONNECTED_512' in
node.name
])
# pylint: enable=g-complex-comprehension
self.assertLen(model.predictions, 3 * (1 + 2))
self.assertIn('probabilities', model.predictions)
self.assertIn('log_probabilities', model.predictions)
self.assertIn('predictions', model.predictions)
def test_projection(self):
# Force graph mode
with tf.compat.v1.Graph().as_default():
learning_rate_spec = {'learning_rate': 0.001, 'gradient_max_norm': 3}
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
}
multi_task_spec {
label_name: "label2"
number_of_classes: 5
}
""", spec)
task_manager_instance = task_manager.TaskManager(
spec,
logits_dimension=None,
loss_fn=loss_fns.make_multi_class_loss_fn(),
head=None)
logits = tf.keras.layers.Dense(10)(tf.zeros([20, 10]))
logits_spec = architecture_utils.LogitsSpec(logits=logits)
fake_tower = collections.namedtuple('fake_tower',
['logits_spec', 'previous_model_dir'])
towers = {'search_generator': [fake_tower(logits_spec, None)]}
features = {'x': tf.zeros([10, 10])}
model = task_manager_instance.create_model_spec(
features=features,
params=hp.HParams(optimizer='sgd'),
learning_rate_spec=learning_rate_spec,
towers=towers,
labels={
'label1': tf.ones([20], dtype=tf.int32),
'label2': tf.ones([20], dtype=tf.int32)
},
my_id=1,
model_directory=self.get_temp_dir(),
mode=tf.estimator.ModeKeys.TRAIN,
trial_mode=trial_utils.TrialMode.NO_PRIOR,
use_tpu=False,
predictions_fn=_default_predictions_fn)
self.assertEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'label1_0_search_generator/maybe_proj' in node.name
])
self.assertNotEmpty([
node.name
for node in tf.compat.v1.get_default_graph().as_graph_def().node
if 'label2_0_search_generator/maybe_proj' in node.name
])
self.assertLen(model.predictions, 3 * (1 + 2))
self.assertIn('probabilities', model.predictions)
self.assertIn('log_probabilities', model.predictions)
self.assertIn('predictions', model.predictions)
def test_get_task(self):
spec = phoenix_spec_pb2.PhoenixSpec(
problem_type=phoenix_spec_pb2.PhoenixSpec.DNN)
text_format.Merge(
"""
multi_task_spec {
label_name: "label1"
number_of_classes: 10
}
multi_task_spec {
label_name: "label2"
number_of_classes: 5
}
""", spec)
new_tower = task_manager.Task.get_task(
phoenix_spec=spec,
tower_name='label1_0_search_generator',
architecture=np.array([1]),
is_training=True,
logits_dimesnion=10,
is_frozen=False,
hparams={},
model_directory='/tmp/',
generator_name='search_generator',
previous_tower_name=None,
previous_model_dir=None)
self.assertIsNone(new_tower.previous_model_dir)
imported_tower = task_manager.Task.get_task(
phoenix_spec=spec,
tower_name='label1_0_prior_generator',
architecture=np.array([]),
is_training=True,
logits_dimesnion=10,
is_frozen=False,
hparams={},
model_directory='/tmp/',
generator_name='prior_generator',
previous_tower_name='label1_0_search_generator',
previous_model_dir='/tmp/oldmodel')
self.assertEqual(imported_tower.previous_model_dir, '/tmp/oldmodel')
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import time
from girder.exceptions import GirderException
FilterOperators = {
'eq': 'eq',
None: 'eq',
'=': 'eq',
'ne': 'ne',
'!=': 'ne',
'<>': 'ne',
'gte': 'gte',
'min': 'gte',
'>=': 'gte',
'gt': 'gt',
'>': 'gt',
'lt': 'lt',
'max': 'lt',
'<': 'lt',
'lte': 'lte',
'<=': 'lte',
'in': 'in',
'not_in': 'not_in',
'notin': 'not_in',
'regex': 'regex', # case sensitive regex
'regexp': 'regex',
'~': 'regex',
'not_regex': 'not_regex',
'notregex': 'not_regex',
'not_regexp': 'not_regex',
'notregexp': 'not_regex',
'!~': 'not_regex',
'search': 'search', # case insensitive regex
'~*': 'search',
'not_search': 'not_search',
'notsearch': 'not_search',
'!~*': 'not_search',
'is': 'is',
'not_is': 'not_is',
'notis': 'not_is',
'isnot': 'not_is',
'is_not': 'not_is',
}
DatatypeOperators = {
'array': {'in', 'not_in', 'is', 'not_is'},
'boolean': {'eq', 'ne', 'in', 'not_in', 'is', 'not_is'},
'date': {'eq', 'ne', 'gte', 'gt', 'lt', 'lte', 'in', 'not_in', 'is',
'not_is'},
'duration': {'eq', 'ne', 'gte', 'gt', 'lt', 'lte', 'in', 'not_in', 'is',
'not_is'},
'enum': {'eq', 'ne', 'in', 'not_in', 'is', 'not_is'},
'number': {'eq', 'ne', 'gte', 'gt', 'lt', 'lte', 'in', 'not_in', 'is',
'not_is'},
'string': {'eq', 'ne', 'gte', 'gt', 'lt', 'lte', 'in', 'not_in', 'regex',
'not_regex', 'search', 'not_search', 'is', 'not_is'},
}
_connectorClasses = {}
_connectorCache = {}
_connectorCacheMaxSize = 10 # Probably should make this configurable
def getDBConnectorClass(uri):
"""
Get a DB connector class. This checks if such a class exists and either
returns a reference to the class or None.
:param name: name of the connector class, as registered by __init__.
:return: the connector class or None
"""
dialect, clsname = getDBConnectorClassFromDialect(uri)
if clsname is None:
return None
return _connectorClasses.get(clsname, {}).get('class')
def getDBConnectorClassFromDialect(dialect, name=None):
"""
Get a DB connector class and preferred dialect. This checks if such a
class exists and either returns a class and dialect name or None.
:param dialect: name of a dialect or a uri.
:param name: name of the DB connector. If None, all DB connectors are
checked.
:return: the preferred dialect name or None.
:return: the connector class name or None.
"""
if dialect and '://' in dialect:
dialect = dialect.split('://', 1)[0]
if ':' in dialect and dialect.split(':', 1)[0] in _connectorClasses:
return dialect, dialect.split(':', 1)[0]
# Sort our classes by priority (lower is higher priority) and find the
# first class that has the specified dialect.
classes = [record[-1] for record in sorted([
(_connectorClasses[cname]['dialect'].get('priority', 0), cname)
for cname in _connectorClasses if cname == name or not name])]
for name in classes:
dialects = _connectorClasses[name]['dialect'].get('dialects', {})
if dialect in dialects:
return dialects[dialect], name
if dialect in classes:
return _connectorClasses[dialect]['dialect'].get(
'default_dialect', dialect), dialect
return None, None
def registerConnectorClass(name, cls, dialects):
"""
Register a connector class with a specific name.
:param name: the name to register. This is what an item has to specify in
the 'type' field.
:param cls: a reference to the connector class.
:param dialects: a dictionary of dialect names that this class handles.
The keys are allowed dialect names, and the values are
the dialect names that should be used.
"""
_connectorClasses[name] = {'class': cls, 'dialect': dialects}
def clearDBConnectorCache(id):
"""
If the id has been used in the connector cache, discard the entry.
:param id: key for the connector cache.
"""
id = str(id)
if id in _connectorCache:
_connectorCache.pop(id, None)
return True
return False
def getDBConnector(id, dbinfo):
"""
Get a specific DB connector, caching it if possible.
:param id: key for the connector cache. None to never use the cache.
:param dbinfo: a dictionary of information to pass to the connector
:return: the connector instance, or None if none available.
"""
if id is not None:
id = str(id)
conn = _connectorCache.get(id, None)
if conn is None:
connClass = getDBConnectorClass(dbinfo.get('uri'))
if connClass is None:
return None
conn = connClass(**dbinfo)
if not getattr(conn, 'initialized', None):
return None
if id is not None:
if len(_connectorCache) > _connectorCacheMaxSize:
_connectorCache.clear()
_connectorCache[id] = conn
return conn
class DatabaseConnectorException(GirderException):
pass
class DatabaseConnector(object):
# If a database connector can query available databases, set this to false
databaseNameRequired = True
def __init__(self, *args, **kwargs):
if not self.validate(**kwargs):
raise DatabaseConnectorException(
'Failed to validate database connector.')
self.initialized = False
self.allowFieldFunctions = False
self.allowSortFunctions = False
self.allowFilterFunctions = False
@classmethod
def canonicalDatabaseUri(cls, uri):
"""
Adjust a database uri to a canonical form.
:param uri: the proposed uri.
:returns: the adjusted uri.
"""
return uri
def checkOperatorDatatype(self, field, operator, fieldList=None):
"""
Check if the specified operator is allowed on a specific field,
probably based on the field type.
:param field: the name of the field that will be operated on.
:param operator: the operator to check.
:param fieldList: the known field list, probably as returned by
getFieldInfo.
:returns: True if the operator is allowed.
"""
datatype = None
if fieldList is None:
fieldList = self.getFieldInfo()
for fieldEntry in fieldList:
if field == fieldEntry.get('name'):
datatype = fieldEntry.get('datatype', fieldEntry.get('type'))
break
if datatype in DatatypeOperators:
return operator in DatatypeOperators[datatype]
return True
def getFieldInfo(self):
"""
Return a list of fields that are known and can be queried.
:return: a list of known fields. Each entry is a dictionary with name,
datatype, and optionally a description.
"""
return []
@staticmethod
def getTableList(uri, internalTables=False, **kwargs):
"""
Get a list of known databases, each of which has a list of known tables
from the database. This is of the form [{'database': (database 1),
'tables': [...]}, {'database': (database 2), 'tables': [...]}, ...].
Each table entry is of the form {'table': (table 1), 'name': (name 1)}
and may contain additonal connection information, such as schema.
:param uri: uri to connect to the database.
:param internaltables: True to return tables about the database itself.
:returns: A list of known tables.
"""
return []
def isField(self, name, fields=None, allowFunc=False):
"""
Check if a specified name is a valid field. If so, return the
canonical field name or True if a function.
:param name: the name to check. This can also be a dictionary with
'field' or ('func' and optionally 'param').
:param fields: the results from getFieldInfo. If None, this calls
getFieldInfo.
:param allowFunc: if True, also allow left functions.
:return: False if this is not a known field, iTrue if it is.
field name.
"""
if fields is None:
fields = self.getFieldInfo()
if isinstance(name, dict):
if 'field' in name:
name = name['field']
elif 'func' in name and allowFunc:
return self.isFunction(name, fields) is not False
else:
return False
for field in fields:
if name == field.get('name'):
return name
return False
def isFunction(self, func, fields=None):
"""
Check if the specified object is a well-formed function reference. If
it is, return a canonical form. Functions are dictionaries with at
least a 'func' or 'lfunc' in the dictionary for left values and 'rfunc'
for right values. Functions optionally have a corresponding
'(l|r|)param' key which contains either a single value or a list. Each
entry in the list contains a value or a dictionary, where the
dictionary contains either another left function, a 'field' parameter,
or a 'value' parameter. In the canonical form, this is always a
dictionary with 'func' and 'param', param is always a list, and the
list always contains dictionaries.
:param func: a dictionary containing the function specification.
:param fields: the results from getFieldInfo. If None, this calls
getFieldInfo.
:returns: False if func is not a function specification, otherwise the
canonical function dictionary.
"""
if 'func' not in func:
return False
result = {
'func': func['func'],
'param': []
}
param = func.get('param', func.get('params', []))
if not isinstance(param, (list, tuple)):
param = [param]
for entry in param:
if not isinstance(entry, dict):
entry = {'value': entry}
else:
if 'value' in entry:
entry = {'value': entry['value']}
elif 'field' in entry:
if not self.isField(entry.get('field')):
return False
entry = {'field': self.isField(entry.get('field'))}
else:
entry = self.isFunction(entry)
if entry is False:
return False
result['param'].append(entry)
return result
def performSelect(self, fields=[], queryProps={}, filters=[], client=None):
"""
Perform a select query. The results are passed back as a dictionary
with the following values:
limit: the limit used in the query. Negative or None for all.
offset: the offset used in the query
sort: the list of sort parameters used in the query.
fields: a list of the fields that are being returned in the order
that they are returned.
data: a list with one entry per row of results. Each entry is a list
with one entry per column.
:param fields: the results from getFieldInfo.
:param queryProps: general query properties, including limit, offset,
sort, fields, group, wait, poll, and initwait.
:param filters: a list of filters to apply.
:param client: if a client is specified, a previous query made by this
client can be cancelled.
:return: the results of the query. See above.
"""
if queryProps.get('fields') is None:
queryProps['fields'] = [field['name'] for field in fields]
return {
'limit': queryProps.get('limit'),
'offset': queryProps.get('offset'),
'sort': queryProps.get('sort'),
'fields': queryProps.get('fields'),
'data': []
}
def performSelectWithPolling(self, fields=None, queryProps={},
*args, **kwargs):
"""
Perform a select query. If polling is enabled, wait the initial wait
before making the query, then perform the query at the polling interval
until either at least one data item has been returned or the wait time
has elapsed. See performSelect for more information.
:param fields: the results from getFieldInfo. If None, this may call
getFieldInfo.
:param queryProps: general query properties, including limit, offset,
sort, fields, group, wait, poll, and initwait.
:param filters: a list of filters to apply.
:param client: if a client is specified, a previous query made by this
client can be cancelled.
:return: the results of the query. See performSelect.
"""
wait = queryProps.get('wait')
if not wait:
return self.performSelect(fields, queryProps, *args, **kwargs)
if queryProps.get('initwait'):
time.sleep(queryProps['initwait'])
poll = queryProps.get('poll', 10)
starttime = time.time()
result = self.performSelect(fields, queryProps, *args, **kwargs)
while result is not None and not len(result['data']):
curtime = time.time()
if curtime >= starttime + wait:
break
# We wait the poll interval unless less than that amount of time is
# left in our wait cycle. If that is the case, we wait the
# greater of the remaining time and half the poll interval. This
# means that the total wait time can be up to half the poll
# internval plus the query time longer than that specified.
time.sleep(max(min(poll, starttime + wait - curtime), poll * 0.5))
result = self.performSelect(fields, queryProps, *args, **kwargs)
return result
@staticmethod
def validate(*args, **kwargs):
"""
Validate that the passed arguments are sufficient for connecting to the
database.
:returns: True if the arguments should allow connecting to the db.
"""
return False
# Enable and override to customize how data gets dumped to json
# @staticmethod
# def jsonDumps(*args, **kwargs):
# return json.dumps(*args, **kwargs)
def databaseFromUri(uri):
"""
Extract the name of the database from the database connection uri. If
there is no database, return None. The uri is of the form
(dialect)://[(user name)[:(password)]@](server)[:(port)]
[/[(database)[/]]][?(options)] or (dialect):///(path)
:param uri: the database connection uri.
:returns: the name of the database or None.
"""
if ':///' in uri:
parts = uri.split(':///', 1)[-1].split('/')
if not parts[-1]:
return None
return parts[-1]
parts = uri.split('://', 1)[-1].split('/')
if len(parts) < 2 or not parts[1]:
return None
return parts[1]
|
|
#!/usr/bin/env python
"""
A sample implementation of Ukkonen's suffix trie.
"""
__all__ = ['MaximalRepeat', 'Node', 'STrie']
class MaximalRepeat(object):
"""
A struct-like object for maximal repeat metadata.
"""
__slots__ = ('strings', 'length', 'indices', 'contains')
def __init__(self, strings, length, indices, contains=None):
"""
`strings` is an array of strings where the maximal repeat is found.
`length` is the length of this repeat.
`indices` are the offsets in `strings` to the *last* element of the repeat.
`contains` is a reference to a contained smaller but more frequent repeat.
"""
self.strings, self.length, self.indices, self.contains =\
strings, length, indices, contains
def __repr__(self):
index = iter(self.indices).next()
string = self.strings[index[0]][index[1] - self.length + 1:index[1] + 1]
return '<' + str(list(self.indices)) + ':' + string + '>'
class Node(object):
"""
A suffix trie node.
"""
__slots__ = ('_indices', '_children', 'suffix_link')
def __init__(self, index, suffix_link=None):
"""
`index` is the 2-tuple position of this node in the suffix trie.
"""
# not using set because set takes up entirely too much memory
self._indices = index
self._children = None
self.suffix_link = suffix_link
def _update_index(self, index):
if not isinstance(self._indices, list) and self._indices != index:
self._indices = [self._indices, index]
elif index not in self._indices:
self._indices.append(index)
@property
def index_len(self):
if isinstance(self._indices, list):
return len(self._indices)
if self._indices is not None:
return 1
return 0
@property
def first_index(self):
if isinstance(self._indices, list):
return self._indices[0]
if self._indices is not None:
return self._indices
raise TypeError()
@property
def indices(self):
if isinstance(self._indices, list):
return tuple(sorted(self._indices))
return tuple([self._indices])
def __getitem__(self, index):
if self._children is not None:
if isinstance(self._children, dict):
return self._children[index]
if self._children[0] == index:
return self._children[1]
raise KeyError(index)
def __setitem__(self, index, val):
if self._children is not None:
if isinstance(self._children, dict):
self._children[index] = val
return
if self._children[0] == index:
self._children = (index, val)
return
self._children = { self._children[0]: self._children[1], index: val }
else:
self._children = (index, val)
def keys(self):
if self._children is not None:
if isinstance(self._children, dict):
return self._children.keys()
return (self._children[0],)
return ()
class STrie(object):
"""
A suffix trie.
"""
__slots__ = ('root', 'strings', 'nodes_processed', 'current', '_root_keys')
def __init__(self):
self.root = Node(None)
self.strings = []
self.nodes_processed = 0
self.current = None
self._root_keys = []
def add(self, string):
self.nodes_processed = 0
self.strings.append(string)
string_index = len(self.strings) - 1
self.current = self.root
for i in range(len(string)):
if string[i] not in self.root.keys():
self._root_keys.append(string[i])
for count in self._insert((string_index, i)):
yield count
for count in self._insert((string_index, len(string))):
yield count
yield self.nodes_processed
def _insert(self, index):
try:
key = self.strings[index[0]][index[1]]
except IndexError:
key = index
current, last_inserted = self.current, None
while current is not None:
child = None
if key in current.keys():
n = current[key]
while n.suffix_link is not None:
n._update_index(index)
n = n.suffix_link
child = current[key]
elif current.suffix_link is None:
child = Node(index, current)
else:
child = Node(index)
if last_inserted is not None:
last_inserted.suffix_link = child
current[key] = child
current, last_inserted = current.suffix_link, child
self.nodes_processed += 1
if self.nodes_processed % 1000 == 0:
yield self.nodes_processed
self.current = self.current[key]
def maximal_repeats(self,
cutoff_metric = lambda count, length: int(count >= 3 and length >= 3)):
"""
Returns maximal repeats where the count and length of the repeat are
greater than the provided cutoff metric as determined by the provided
`cutoff_metric` function taking count and length as arguments, respectively.
"""
ret = []
seen = {}
for key in self._root_keys:
result = []
stack = [(self.root[key], 1, None)]
while len(stack) != 0:
node, length, contains = stack.pop()
len_keys = len(node.keys())
if len_keys == 0 and cutoff_metric(node.index_len, length) > 0\
and (node.first_index not in seen.keys() or\
node.index_len > seen[node.first_index]):
result.append(MaximalRepeat\
(self.strings, length, node.indices, contains))
elif len_keys == 1:
stack.append( (node[node.keys()[0]], length + 1, contains) )
else:
if (node.first_index not in seen.keys() or\
node.index_len > seen[node.first_index]) and\
cutoff_metric(node.index_len, length) > 0:
contains = MaximalRepeat(self.strings, length, node.indices, contains)
result.append(contains)
for key in node.keys():
stack.append( (node[key], length + 1, contains) )
seen.update([(min(r.indices), len(r.indices)) for r in result])
ret += result
return ret
if __name__ == '__main__':
import argparse
from sys import stdout
import pygraphviz as pgv
def render_node_r(node, strings, graph):
try:
index = node.first_index
label = strings[index[0]][index[1]]
except IndexError:
label = '<eof>'
except TypeError:
label = '<root>'
suffix_link = node.suffix_link
node_id = id(node)
graph.add_node(node_id, label=label)
keys = node.keys()
for key in keys:
render_node_r(node[key], strings, graph)
graph.add_edge(node_id, id(node[key]))
if suffix_link is not None:
try:
index = suffix_link.first_index
suffix_label = strings[index[0]][index[1]]
except IndexError:
suffix_label = '<eof>'
except TypeError:
suffix_label = '<root>'
graph.add_node(id(suffix_link), label=suffix_label)
graph.add_edge(node_id, id(suffix_link), color='gray',
constraint=False)
def render_trie(trie):
graph = pgv.AGraph(directed=True)
graph.graph_attr['label'] = "Suffix Trie for '%s'" % trie.strings
graph.node_attr['shape'] = 'circle'
render_node_r(trie.root, trie.strings, graph)
return graph
def main():
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument("string", nargs='+',
help="String to render the suffix trie of")
args = parser.parse_args()
trie = STrie()
for string in args.string:
print(string)
for count in trie.add(string):
stdout.write('\r\t%d nodes processed' % count)
stdout.flush()
stdout.write('\n')
result = render_trie(trie)
result.layout('dot')
result.draw(''.join(args.string) + '-strie.png')
repeats = sorted(trie.maximal_repeats(\
lambda repeats, length: repeats >= 3 and length >= 3),\
lambda x, y: cmp(x.length, y.length), reverse=True)
for r in repeats:
print(r)
main()
|
|
import unittest
from picogeojson import (Point, LineString, Polygon,
MultiPoint, MultiLineString, MultiPolygon,
GeometryCollection, Feature, FeatureCollection,
DEFAULTCRS)
from picogeojson.map import Map
class MapTests(unittest.TestCase):
def setUp(self):
self.geometrycollection = \
GeometryCollection(
[Point((1, 2)),
Polygon([[(10, 10), (10, 11), (9, 11), (9, 10), (10, 10)]]),
LineString([(1, 1), (2, 2), (3, 3)]),
GeometryCollection(
[Point((3, 4)),
MultiPolygon([[[(0, 0), (3, 0), (3, 3), (0, 3), (0, 0)],
[(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)]],
[[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]],
[[(0, 0), (3, 0), (3, 3), (0, 3), (0, 0)],
[(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)]],
[[(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)],
[(10, 10), (10, 20), (20, 20), (20, 10), (10, 10)],
[(50, 50), (50, 55), (55, 60), (60, 50), (50, 50)]]]),
Point((5, 6)),
LineString([(1, 1), (2, 2), (3, 3)]),
Polygon([[(1, 1), (0, 2), (-1, 1), (1, 0), (1, 1)]])],
DEFAULTCRS),
MultiPoint([(7, 8), (9, 10)]),
LineString([(1, 1), (2, 2), (3, 3)]),
Point((11, 12)),
LineString([(1, 1), (2, 2), (3, 3)]),
MultiLineString([[(1, 1), (2, 2), (3, 3)],
[(4, 4), (5, 5), (6, 6)]]),
],
DEFAULTCRS)
self.featurecollection = FeatureCollection([
Feature(Point((-1, -2)), {"style": "stout"}),
Feature(MultiPolygon([[[(0, 0), (3, 0), (3, 3), (0, 3), (0, 0)],
[(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)]],
[[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)]],
[[(0, 0), (3, 0), (3, 3), (0, 3), (0, 0)],
[(1, 1), (1, 2), (2, 2), (2, 1), (1, 1)]],
[[(0, 0), (100, 0), (100, 100), (0, 100), (0, 0)],
[(10, 10), (10, 20), (20, 20), (20, 10), (10, 10)],
[(50, 50), (50, 55), (55, 60), (60, 50), (50, 50)]]]),
{"style": "stout"}),
Feature(LineString([(-1, -2), (-3, -4), (-5, -3)]),
{"style": "lager"}),
Feature(Polygon([[(-1, -2), (-3, -4), (-5, -3), (-1, -2)]]),
{"style": "saison"}),
Feature(MultiPoint([(-1, -2), (-3, -4), (-5, -3)]),
{"style": "kolsch"}),
Feature(MultiLineString([[(0, 0), (1, 1), (2, 3)],
[(5, 6), (1, 3), (4, 7)]]),
{"style": "pilsner"}),
], DEFAULTCRS)
def test_get_points(self):
result = Map(self.geometrycollection)
count = 0
for pt in result.points:
self.assertTrue(isinstance(pt, Point))
count += 1
self.assertEqual(count, 4)
def test_get_linestrings(self):
result = Map(self.geometrycollection)
count = 0
for ls in result.linestrings:
self.assertTrue(isinstance(ls, LineString))
count += 1
self.assertEqual(count, 4)
def test_get_polygons(self):
result = Map(self.geometrycollection)
count = 0
for pg in result.polygons:
self.assertTrue(isinstance(pg, Polygon))
count += 1
self.assertEqual(count, 2)
def test_get_multipoints(self):
result = Map(self.geometrycollection)
count = 0
for mpt in result.multipoints:
self.assertTrue(isinstance(mpt, MultiPoint))
count += 1
self.assertEqual(count, 1)
def test_get_multilinestrings(self):
result = Map(self.geometrycollection)
count = 0
for mls in result.multilinestrings:
self.assertTrue(isinstance(mls, MultiLineString))
count += 1
self.assertEqual(count, 1)
def test_get_multipolygons(self):
result = Map(self.geometrycollection)
count = 0
for mpg in result.multipolygons:
self.assertTrue(isinstance(mpg, MultiPolygon))
count += 1
self.assertEqual(count, 1)
def test_get_point_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(Point):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, Point))
count += 1
self.assertEqual(count, 1)
def test_get_linestring_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(LineString):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, LineString))
count += 1
self.assertEqual(count, 1)
def test_get_polygon_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(Polygon):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, Polygon))
count += 1
self.assertEqual(count, 1)
def test_get_multipoint_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(MultiPoint):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, MultiPoint))
count += 1
self.assertEqual(count, 1)
def test_get_multilinestring_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(MultiLineString):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, MultiLineString))
count += 1
self.assertEqual(count, 1)
def test_get_multipolygon_features(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(MultiPolygon):
self.assertTrue(isinstance(f, Feature))
self.assertTrue(isinstance(f.geometry, MultiPolygon))
count += 1
self.assertEqual(count, 1)
def test_features_argument_error(self):
result = Map(self.featurecollection)
with self.assertRaises(TypeError):
[a for a in result.extract_features({"style": "stout"})]
def test_get_by_attributes(self):
result = Map(self.featurecollection)
count = 0
for f in result.extract_features(properties={"style": "stout"}):
count += 1
self.assertEqual(count, 2)
for f in result.extract_features(properties={"style": "kolsch"}):
self.assertTrue(isinstance(f.geometry, MultiPoint))
def test_map_geometries(self):
m = Map(
GeometryCollection([
Point((1, 2)),
LineString([(5, 7), (2, 4), (5, 9)]),
MultiPoint([(0, 3), (12, 5), (6, 2), (8, 9)]),
])
)
expected_original = GeometryCollection([
Point((1, 2)),
LineString([(5, 7), (2, 4), (5, 9)]),
MultiPoint([(0, 3), (12, 5), (6, 2), (8, 9)]),
])
expected_new = GeometryCollection([
Point((-1, -2)),
LineString([(5, 7), (2, 4), (5, 9)]),
MultiPoint([(0, 3), (12, 5), (6, 2), (8, 9)]),
])
m2 = m.map(lambda pt: Point((-pt.coordinates[0], -pt.coordinates[1])),
Point)
self.assertEqual(m.raw, expected_original)
self.assertEqual(m2.raw, expected_new)
def test_map_geometries_to_feature_fails(self):
m = Map(
GeometryCollection([
Point((1, 2)),
LineString([(5, 7), (2, 4), (5, 9)]),
MultiPoint([(0, 3), (12, 5), (6, 2), (8, 9)]),
])
)
with self.assertRaises(TypeError):
m2 = m.map(lambda pt: Feature(
Point((-pt.coordinates[0], -pt.coordinates[1])),
{}
), Point)
def test_map_features(self):
m = Map(
FeatureCollection([
Feature(Point((1, 0)), {"color": "red"}),
Feature(Point((3, 2)), {"color": "blue"}),
])
)
expected = FeatureCollection([
Feature(Point((-1, 0)), {"color": "red"}),
Feature(Point((-3, 2)), {"color": "blue"}),
])
new = m.map_features(
lambda f: Feature(Point((-f.geometry.coordinates[0],
f.geometry.coordinates[1])),
f.properties)
)
self.assertEqual(new.raw, expected)
def test_map_features_by_properties(self):
m = Map(
FeatureCollection([
Feature(Point((1, 0)), {"color": "red"}),
Feature(Point((3, 2)), {"color": "blue"}),
])
)
expected = FeatureCollection([
Feature(Point((1, 0)), {"color": "red"}),
Feature(Point((-3, 2)), {"color": "blue"}),
])
new = m.map_features(
lambda f: Feature(Point((-f.geometry.coordinates[0],
f.geometry.coordinates[1])),
f.properties),
properties={"color": "blue"}
)
self.assertEqual(new.raw, expected)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A wrapper around ssh for common operations on a CrOS-based device"""
import logging
import os
import re
import subprocess
import sys
import tempfile
# TODO(nduca): This whole file is built up around making individual ssh calls
# for each operation. It really could get away with a single ssh session built
# around pexpect, I suspect, if we wanted it to be faster. But, this was
# convenient.
def IsRunningOnCrosDevice():
"""Returns True if we're on a ChromeOS device."""
lsb_release = '/etc/lsb-release'
if sys.platform.startswith('linux') and os.path.exists(lsb_release):
with open(lsb_release, 'r') as f:
res = f.read()
if res.count('CHROMEOS_RELEASE_NAME'):
return True
return False
def RunCmd(args, cwd=None, quiet=False):
"""Opens a subprocess to execute a program and returns its return value.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Return code from the command execution.
"""
if not quiet:
logging.debug(' '.join(args) + ' ' + (cwd or ''))
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(args=args, cwd=cwd, stdout=devnull,
stderr=devnull, stdin=devnull, shell=False)
return p.wait()
def GetAllCmdOutput(args, cwd=None, quiet=False):
"""Open a subprocess to execute a program and returns its output.
Args:
args: A string or a sequence of program arguments. The program to execute is
the string or the first item in the args sequence.
cwd: If not None, the subprocess's current directory will be changed to
|cwd| before it's executed.
Returns:
Captures and returns the command's stdout.
Prints the command's stderr to logger (which defaults to stdout).
"""
if not quiet:
logging.debug(' '.join(args) + ' ' + (cwd or ''))
with open(os.devnull, 'w') as devnull:
p = subprocess.Popen(args=args, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=devnull)
stdout, stderr = p.communicate()
if not quiet:
logging.debug(' > stdout=[%s], stderr=[%s]', stdout, stderr)
return stdout, stderr
def HasSSH():
try:
RunCmd(['ssh'], quiet=True)
RunCmd(['scp'], quiet=True)
logging.debug("HasSSH()->True")
return True
except OSError:
logging.debug("HasSSH()->False")
return False
class LoginException(Exception):
pass
class KeylessLoginRequiredException(LoginException):
pass
class CrOSInterface(object):
# pylint: disable=R0923
def __init__(self, hostname = None, ssh_identity = None):
self._hostname = hostname
# List of ports generated from GetRemotePort() that may not be in use yet.
self._reserved_ports = []
if self.local:
return
self._ssh_identity = None
self._ssh_args = ['-o ConnectTimeout=5',
'-o StrictHostKeyChecking=no',
'-o KbdInteractiveAuthentication=no',
'-o PreferredAuthentications=publickey',
'-o UserKnownHostsFile=/dev/null']
if ssh_identity:
self._ssh_identity = os.path.abspath(os.path.expanduser(ssh_identity))
@property
def local(self):
return not self._hostname
@property
def hostname(self):
return self._hostname
def FormSSHCommandLine(self, args, extra_ssh_args=None):
if self.local:
# We run the command through the shell locally for consistency with
# how commands are run through SSH (crbug.com/239161). This work
# around will be unnecessary once we implement a persistent SSH
# connection to run remote commands (crbug.com/239607).
return ['sh', '-c', " ".join(args)]
full_args = ['ssh',
'-o ForwardX11=no',
'-o ForwardX11Trusted=no',
'-n'] + self._ssh_args
if self._ssh_identity is not None:
full_args.extend(['-i', self._ssh_identity])
if extra_ssh_args:
full_args.extend(extra_ssh_args)
full_args.append('root@%s' % self._hostname)
full_args.extend(args)
return full_args
def _RemoveSSHWarnings(self, toClean):
"""Removes specific ssh warning lines from a string.
Args:
toClean: A string that may be containing multiple lines.
Returns:
A copy of toClean with all the Warning lines removed.
"""
# Remove the Warning about connecting to a new host for the first time.
return re.sub('Warning: Permanently added [^\n]* to the list of known '
'hosts.\s\n', '', toClean)
def RunCmdOnDevice(self, args, cwd=None, quiet=False):
stdout, stderr = GetAllCmdOutput(
self.FormSSHCommandLine(args), cwd, quiet=quiet)
# The initial login will add the host to the hosts file but will also print
# a warning to stderr that we need to remove.
stderr = self._RemoveSSHWarnings(stderr)
return stdout, stderr
def TryLogin(self):
logging.debug('TryLogin()')
assert not self.local
stdout, stderr = self.RunCmdOnDevice(['echo', '$USER'], quiet=True)
if stderr != '':
if 'Host key verification failed' in stderr:
raise LoginException(('%s host key verification failed. ' +
'SSH to it manually to fix connectivity.') %
self._hostname)
if 'Operation timed out' in stderr:
raise LoginException('Timed out while logging into %s' % self._hostname)
if 'UNPROTECTED PRIVATE KEY FILE!' in stderr:
raise LoginException('Permissions for %s are too open. To fix this,\n'
'chmod 600 %s' % (self._ssh_identity,
self._ssh_identity))
if 'Permission denied (publickey,keyboard-interactive)' in stderr:
raise KeylessLoginRequiredException(
'Need to set up ssh auth for %s' % self._hostname)
raise LoginException('While logging into %s, got %s' % (
self._hostname, stderr))
if stdout != 'root\n':
raise LoginException(
'Logged into %s, expected $USER=root, but got %s.' % (
self._hostname, stdout))
def FileExistsOnDevice(self, file_name):
if self.local:
return os.path.exists(file_name)
stdout, stderr = self.RunCmdOnDevice([
'if', 'test', '-e', file_name, ';',
'then', 'echo', '1', ';',
'fi'
], quiet=True)
if stderr != '':
if "Connection timed out" in stderr:
raise OSError('Machine wasn\'t responding to ssh: %s' %
stderr)
raise OSError('Unepected error: %s' % stderr)
exists = stdout == '1\n'
logging.debug("FileExistsOnDevice(<text>, %s)->%s" % (file_name, exists))
return exists
def PushFile(self, filename, remote_filename):
if self.local:
args = ['cp', '-r', filename, remote_filename]
stdout, stderr = GetAllCmdOutput(args, quiet=True)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
return
args = ['scp', '-r' ] + self._ssh_args
if self._ssh_identity:
args.extend(['-i', self._ssh_identity])
args.extend([os.path.abspath(filename),
'root@%s:%s' % (self._hostname, remote_filename)])
stdout, stderr = GetAllCmdOutput(args, quiet=True)
stderr = self._RemoveSSHWarnings(stderr)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
def PushContents(self, text, remote_filename):
logging.debug("PushContents(<text>, %s)" % remote_filename)
with tempfile.NamedTemporaryFile() as f:
f.write(text)
f.flush()
self.PushFile(f.name, remote_filename)
def GetFileContents(self, filename):
assert not self.local
with tempfile.NamedTemporaryFile() as f:
args = ['scp'] + self._ssh_args
if self._ssh_identity:
args.extend(['-i', self._ssh_identity])
args.extend(['root@%s:%s' % (self._hostname, filename),
os.path.abspath(f.name)])
stdout, stderr = GetAllCmdOutput(args, quiet=True)
stderr = self._RemoveSSHWarnings(stderr)
if stderr != '':
raise OSError('No such file or directory %s' % stderr)
with open(f.name, 'r') as f2:
res = f2.read()
logging.debug("GetFileContents(%s)->%s" % (filename, res))
return res
def ListProcesses(self):
"""Returns (pid, cmd, ppid, state) of all processes on the device."""
stdout, stderr = self.RunCmdOnDevice([
'/bin/ps', '--no-headers',
'-A',
'-o', 'pid,ppid,args:4096,state'], quiet=True)
assert stderr == '', stderr
procs = []
for l in stdout.split('\n'): # pylint: disable=E1103
if l == '':
continue
m = re.match('^\s*(\d+)\s+(\d+)\s+(.+)\s+(.+)', l, re.DOTALL)
assert m
procs.append((int(m.group(1)), m.group(3).rstrip(),
int(m.group(2)), m.group(4)))
logging.debug("ListProcesses(<predicate>)->[%i processes]" % len(procs))
return procs
def RmRF(self, filename):
logging.debug("rm -rf %s" % filename)
self.RunCmdOnDevice(['rm', '-rf', filename], quiet=True)
def Chown(self, filename):
self.RunCmdOnDevice(['chown', '-R', 'chronos:chronos', filename])
def KillAllMatching(self, predicate):
kills = ['kill', '-KILL']
for pid, cmd, _, _ in self.ListProcesses():
if predicate(cmd):
logging.info('Killing %s, pid %d' % cmd, pid)
kills.append(pid)
logging.debug("KillAllMatching(<predicate>)->%i" % (len(kills) - 2))
if len(kills) > 2:
self.RunCmdOnDevice(kills, quiet=True)
return len(kills) - 2
def IsServiceRunning(self, service_name):
stdout, stderr = self.RunCmdOnDevice([
'status', service_name], quiet=True)
assert stderr == '', stderr
running = 'running, process' in stdout
logging.debug("IsServiceRunning(%s)->%s" % (service_name, running))
return running
def GetRemotePort(self):
netstat = self.RunCmdOnDevice(['netstat', '-ant'])
netstat = netstat[0].split('\n')
ports_in_use = []
for line in netstat[2:]:
if not line:
continue
address_in_use = line.split()[3]
port_in_use = address_in_use.split(':')[-1]
ports_in_use.append(int(port_in_use))
ports_in_use.extend(self._reserved_ports)
new_port = sorted(ports_in_use)[-1] + 1
self._reserved_ports.append(new_port)
return new_port
def IsHTTPServerRunningOnPort(self, port):
wget_output = self.RunCmdOnDevice(
['wget', 'localhost:%i' % (port), '-T1', '-t1'])
if 'Connection refused' in wget_output[1]:
return False
return True
def FilesystemMountedAt(self, path):
"""Returns the filesystem mounted at |path|"""
df_out, _ = self.RunCmdOnDevice(['/bin/df', path])
df_ary = df_out.split('\n')
# 3 lines for title, mount info, and empty line.
if len(df_ary) == 3:
line_ary = df_ary[1].split()
if line_ary:
return line_ary[0]
return None
def TakeScreenShot(self, screenshot_prefix):
"""Takes a screenshot, useful for debugging failures."""
# TODO(achuith): Find a better location for screenshots. Cros autotests
# upload everything in /var/log so use /var/log/screenshots for now.
SCREENSHOT_DIR = '/var/log/screenshots/'
SCREENSHOT_EXT = '.png'
self.RunCmdOnDevice(['mkdir', '-p', SCREENSHOT_DIR])
for i in xrange(25):
screenshot_file = ('%s%s-%d%s' %
(SCREENSHOT_DIR, screenshot_prefix, i, SCREENSHOT_EXT))
if not self.FileExistsOnDevice(screenshot_file):
self.RunCmdOnDevice([
'DISPLAY=:0.0 XAUTHORITY=/home/chronos/.Xauthority '
'/usr/local/bin/import',
'-window root',
'-depth 8',
screenshot_file])
return
logging.warning('screenshot directory full.')
|
|
# coding=utf-8
# 2-dimensional ising model with visualization
# Written by Kyrre Ness Sjoebaek
from __future__ import division
import matplotlib.pyplot as plt
from numba import jit
import numpy
import numpy as np
import sys
import math
import pygame
from timeit import default_timer as timer
from scipy import integrate
from scipy import special
from collections import Counter
# Needed for visualize when using SDL
SCREEN = None
FONT = None
BLOCKSIZE = 10
T_CRITICAL = 2./ np.log(1+np.sqrt(2))
class ECounter(object):
def __init__(self, size, temperature=1):
self.counter = Counter()
self.size = size
self.temperature = temperature
def __call__(self, E):
self.counter[E] += 1
def to_pdf(self):
k0 = list(self.counter)
E = np.array(k0, dtype=float) # E / N
counts = np.array(self.counter.values(), dtype=float)
pdf = counts / sum(counts)
ix = np.argsort(E)
return pdf[ix], E[ix]
def to_pdf_per_cell(self):
pdf, E = self.to_pdf()
return pdf, E/ float(self.size)**2
def mean_and_variance_per_cell(self):
N = float(self.size)**2
pdf, E = self.to_pdf()
mean = sum(pdf*E)
sigma2 = (sum(E**2*pdf) - mean**2)/ float(self.temperature)**2
return mean / N, sigma2 / N
@jit(nopython=True)
def periodic(i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i + limit + add) % limit
def dump_to_terminal(spin_matrix, temp, E, M):
# Simple terminal dump
print "temp:", temp, "E:", E, "M:", M
print spin_matrix
def pretty_print_to_terminal(spin_matrix, temp, E, M):
# Pretty-print to terminal
out = ""
size = len(spin_matrix)
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
out += "X"
else:
out += " "
out += "\n"
print "temp:", temp, "E:", E, "M:", M
print out + "\n"
def display_single_pixel(spin_matrix, temp, E, M):
# SDL single-pixel (useful for large arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
SCREEN.set_at((x, y), (255, 255, 255))
else:
SCREEN.set_at((x, y), (0, 0, 0))
SCREEN.unlock()
pygame.display.flip()
def display_block(spin_matrix, temp, E, M):
# SDL block (usefull for smaller arrays)
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
SCREEN.unlock()
pygame.display.flip()
def display_block_with_data(spin_matrix, E, M):
# SDL block w/ data-display
size = len(spin_matrix)
SCREEN.lock()
for y in xrange(size):
for x in xrange(size):
if spin_matrix.item(x, y) == 1:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (255, 255, 255), rect)
else:
rect = pygame.Rect(x * BLOCKSIZE, y * BLOCKSIZE, BLOCKSIZE, BLOCKSIZE)
pygame.draw.rect(SCREEN, (0, 0, 0), rect)
s = FONT.render("<E> = %5.3E; <M> = %5.3E" % E, M, False, (255, 0, 0))
SCREEN.blit(s, (0, 0))
SCREEN.unlock()
pygame.display.flip()
def get_visualize_function(method):
vis_methods = {0: dump_to_terminal,
1:pretty_print_to_terminal,
2:display_single_pixel, # (useful for large arrays)
3:display_block, # (usefull for smaller arrays)
4:display_block_with_data}
def plot_nothing(spin_matrix, temp, E, M):
pass
return vis_methods.get(method, plot_nothing)
def visualize(spin_matrix, temp, E, M, method):
"""
Visualize the spin matrix
Methods:
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
get_visualize_function(method)(spin_matrix, temp, E, M)
@jit(nopython=True)
def metropolis(E, M, w, size, spin_matrix):
# Metropolis
# Loop over all spins, pick a random spin each time
number_of_accepted_configurations = 0
for s in xrange(size**2):
x = int(numpy.random.random() * size)
y = int(numpy.random.random() * size)
deltaE = 2 * spin_matrix[x, y] * (spin_matrix[periodic(x, size, -1), y]
+ spin_matrix[periodic(x, size, 1), y]
+ spin_matrix[x, periodic(y, size, -1)]
+ spin_matrix[x, periodic(y, size, 1)])
accept = numpy.random.random() <= w[deltaE + 8]
if accept:
spin_matrix[x, y] *= -1
M += 2 * spin_matrix[x, y]
E += deltaE
number_of_accepted_configurations += 1
return E, M, number_of_accepted_configurations
@jit(nopython=True)
def _compute_initial_energy(spin_matrix, size):
# Calculate initial energy
E = 0
for j in xrange(size):
for i in xrange(size):
E -= spin_matrix[i, j] * (spin_matrix[periodic(i, size, -1), j]
+ spin_matrix[i, periodic(j, size, 1)])
return E
def monteCarlo(temp, size, trials, visualizer=None, spin_matrix='ordered', burn_in=0, ecounter=None):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for units Kb Kelvin / J
- size: dimension of square matrix
- trials: Monte-carlo trials (how many times do we
flip the matrix?)
- visual_method: What method should we use to visualize?
Output:
- E_av: Energy of matrix averaged over trials, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over trials
- Mabs_variance
- num_accepted_configs
"""
if ecounter is None:
def ecounter(E):
pass
if visualizer is None:
visualizer = get_visualize_function(method=None) # No visualization
# Setup spin matrix, initialize to ground state
if spin_matrix == 'ordered':
spin_matrix = numpy.zeros((size, size), numpy.int8) + 1
elif spin_matrix == 'random':
spin_matrix = np.array(numpy.random.random(size=(size, size))>0.5, dtype=numpy.int8)
else:
raise NotImplementedError('method')
# Setup array for possible energy changes
w = numpy.zeros(17, dtype=float)
for de in xrange(-8, 9, 4): # include +8
w[de + 8] = math.exp(-de / temp)
# Calculate initial magnetization:
M = spin_matrix.sum()
E = _compute_initial_energy(spin_matrix, size)
for i in xrange(burn_in):
E, M, num_accepted_configs = metropolis(E, M, w, size, spin_matrix)
# Create and initialize variables
E_av = E2_av = M_av = M2_av = Mabs_av = 0.0
total_accepted_configs = 0
# Start metropolis MonteCarlo computation
for i in xrange(trials):
E, M, num_accepted_configs = metropolis(E, M, w, size, spin_matrix)
# Update expectation values
total_accepted_configs += num_accepted_configs
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
ecounter(E)
visualizer(spin_matrix, temp, E / float(size**2), M / float(size**2))
# Normalize average values
E_av /= float(trials)
E2_av /= float(trials)
M_av /= float(trials)
M2_av /= float(trials)
Mabs_av /= float(trials)
# Calculate variance and normalize to per-point and temp
E_variance = (E2_av - E_av * E_av) / float(size * size * temp * temp)
M_variance = (M2_av - M_av * M_av) / float(size * size * temp)
Mabs_variance = (M2_av - Mabs_av * Mabs_av) / float(size * size * temp)
# Normalize returned averages to per-point
E_av /= float(size * size)
M_av /= float(size * size)
Mabs_av /= float(size * size)
return E_av, E_variance, M_av, M_variance, Mabs_av, Mabs_variance, total_accepted_configs
def initialize_pygame(size, method):
global SCREEN, FONT
# Initialize pygame
if method == 2 or method == 3 or method == 4:
pygame.init()
if method == 2:
SCREEN = pygame.display.set_mode((size, size))
elif method == 3:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
elif method == 4:
SCREEN = pygame.display.set_mode((size * 10, size * 10))
FONT = pygame.font.Font(None, 12)
def partition2(T):
'''
Return the partition2 function for 2x2 lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
z = 12+4*np.cosh(8.0/T)
return z
def partition(T, size=2):
'''
Return the partition function for size x size lattice
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
'''
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
N = size**2
k1 = special.ellipk(kappa**2)
def energy_mean_asymptotic(T):
'''
Return mean energy for size x size lattice normalized by spins**2 * size **2
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
page 428 in lecture notes
'''
denominator = np.tanh(2.0/T)
kappa = 2*denominator/np.cosh(2./T)
k1 = special.ellipk(kappa**2)
# k = 1./np.sinh(2.0/T)**2
# def integrand(theta):
# return 1./np.sqrt(1-4.*k*(np.sin(theta)/(1+k))**2)
# k11, abserr = integrate.quad(integrand, 0, np.pi/2, epsabs=1e-3, epsrel=1e-3)
return -(1+ 2/np.pi *(2*denominator**2-1)*k1)/denominator
def energy_mean2(T):
'''
Return mean energy for 2 x 2 lattice normalized by spins**2 * 4
Parameters:
-----------
T: arraylike
normalized temperature in units of kb*K/J, kb = boltzmann's constant, K = Kelvin,
J is the coupling constant expressing the strength of the interaction between neighbouring spins
Output:
- E_av: mean of energy, normalized to spins**2
'''
size = 2
return -8*np.sinh(8.0/T)/(np.cosh(8.0/T)+3)/size**2
def energy_variance2(T):
'''
Return variance of energy for 2 x 2 lattice normalized by spins**2 * 4 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
size = 2
return 64.0*(1.+3.*np.cosh(8.0/T))/(np.cosh(8.0/T)+3)**2 / size**2 / T**2
def energy_variance(T):
'''
Return variance of energy for size x size lattice normalized by spins**2 * size**2 * T**2
Output:
-E_variance: Variance of energy, same normalization * temp**2 per cell
'''
tanh2 = np.tanh(2./T)**2
kappa = 2*np.sinh(2./T)/np.cosh(2./T)**2
# N = size**2
k1 = special.ellipk(kappa**2)
k2 = special.ellipe(kappa**2)
return 4/np.pi * (k1-k2 -(1-tanh2)*(np.pi/2+(2*tanh2-1)*k1))/tanh2/T**2
def energy_mean_and_variance2(temperature):
return energy_mean2(temperature), energy_variance2(temperature)
def specific_heat2(T):
return energy_variance2(T)
def magnetization_spontaneous_asymptotic(T):
""" Return spontaneous magnetization for size x size lattice normalized by spins**2 * size**2
for T < Tc= 2.269
"""
tanh2 = np.tanh(1./T)**2
return (1 - (1-tanh2)**4/(16*tanh2**2))**(1./8) # pp 429
# return (1 - 1./np.sinh(2./T)**4)**(1./8)
def magnetization_mean2(T):
'''
Output:
- M_av: Magnetic field of matrix, averaged over trials, normalized to spins**2 per cell
'''
return np.where(T>T_CRITICAL, 0, magnetization_spontaneous_asymptotic(T))
def magnetization_variance2(T):
"""Return variance of magnetization for 2 x 2 lattice normalized by spins**2 * 4 * T"""
size = 2 * 2
denominator = np.cosh(8./T) + 3
mean = magnetization_mean2(T) * size
sigma = 8.0 * (np.exp(8./T) + 1) / denominator - mean**2
return sigma / size / T
def magnetization_mean_and_variance2(T):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
return magnetization_mean2(T), magnetization_variance2(T)
def susceptibility2(T):
'''
Output:
- M_variance: Variance of magnetic field, same normalization * temp
'''
return magnetization_variance2(T)
def magnetization_abs_mean2(T):
'''
Lattice 2x2
Output:
- Mabs: Absolute value of magnetic field, averaged over trials per cell
'''
size = 2
return (2*np.exp(8.0/T)+4)/(np.cosh(8.0/T)+3)/size**2
def magnetization_abs_mean_and_variance2(temperature):
"""Return normalized mean and variance for the moments of a 2 X 2 ising model."""
beta = 1. / temperature
size = 2
denominator = (np.cosh(8 * beta) + 3)
mean = 2 * (np.exp(8 * beta) + 2) / denominator
sigma = (8 * (np.exp(8 * beta) + 1) / denominator - mean**2) / temperature
return mean / size**2, sigma / size**2
def read_input():
"""
method = -1:No visualization (testing)
method = 0: Just print it to the terminal
method = 1: Pretty-print to terminal
method = 2: SDL/pygame single-pixel
method = 3: SDL/pygame rectangle
"""
if len(sys.argv) == 5:
size = int(sys.argv[1])
trials = int(sys.argv[2])
temperature = float(sys.argv[3])
method = int(sys.argv[4])
else:
print "Usage: python", sys.argv[0],\
"lattice_size trials temp method"
sys.exit(0)
if method > 4:
print "method < 3!"
sys.exit(0)
return size, trials, temperature, method
def plot_abs_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
plt.loglog(trial_sizes, np.abs(data[:,i] - truth), label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Absolute error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def plot_rel_error_size2(trial_sizes, data, names, truths, temperature):
for i, truth in enumerate(truths):
name = names[i]
print i
if truth == 0:
scale = 1e-3
else:
scale = np.abs(truth) + 1e-16
plt.loglog(trial_sizes, np.abs(data[:,i] - truth)/scale, label=name)
plt.title('T={} $[k_b K / J]$'.format(temperature))
plt.ylabel('Relative error')
plt.xlabel('Number of trials')
plt.legend(framealpha=0.2)
def compute_monte_carlo(temperature, size, trial_sizes, spin_matrix='ordered'):
data = []
for trials in trial_sizes:
print trials
t0 = timer()
data.append(monteCarlo(temperature, size, trials, spin_matrix=spin_matrix))
print 'elapsed time: {} seconds'.format(timer() - t0)
data = np.array(data)
names = ['Average Energy per spin $E/N$',
'Specific Heat per spin $C_V/N$',
'Average Magnetization per spin $M/N$',
'Susceptibility per spin $var(M)/N$',
'Average |Magnetization| per spin $|M|/N$',
'Variance of |Magnetization| per spin $var(|M|)/N$',
'Number of accepted configurations']
return data, names
def plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix='ordered'):
ids = [0, 4]
for i in ids:
name = names[i]
print i
plt.semilogx(trial_sizes, data[:,i], label=name)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('E or |M|')
plt.xlabel('Number of trials')
plt.legend()
def plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix='ordered'):
i = 6
# for i in ids:
name = names[i]
print i
plt.loglog(trial_sizes, data[:,i], label=name)
x = np.log(trial_sizes)
y = np.log(data[:, i])
mask = np.isfinite(y)
p = np.polyfit(x[mask], y[mask], deg=1)
sgn = '+' if p[1] > 0 else '-'
label_p = 'exp({:2.1f} {} {:2.1f} ln(x))'.format(p[0], sgn, abs(p[1]))
plt.loglog(trial_sizes, np.exp(np.polyval(p, x)), label=label_p)
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('')
plt.xlabel('Number of trials')
plt.legend()
def main(size, trials, temperature, method):
initialize_pygame(size, method)
visualizer = get_visualize_function(method)
(E_av, E_variance, M_av, M_variance, Mabs_av) = monteCarlo(temperature, size, trials, visualizer)
print "T=%15.8E E[E]=%15.8E Var[E]=%15.8E E[M]=%15.8E Var[M]=%15.8E E[|M|]= %15.8E\n" % (temperature, E_av, E_variance, M_av, M_variance, Mabs_av)
pygame.quit()
def task_b(temperatures=(1, 2.4)):
size = 2
trial_sizes = 10**np.arange(1, 6)
for temperature in temperatures:
data, names = compute_monte_carlo(temperature, size, trial_sizes)
truths = (energy_mean_and_variance2(temperature)
+ magnetization_mean_and_variance2(temperature)
+ magnetization_abs_mean_and_variance2(temperature))
plt.figure()
plot_abs_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_abserr_T{}_size{}.png'.format(temperature, 2))
plt.figure()
plot_rel_error_size2(trial_sizes, data, names, truths, temperature)
plt.savefig('task_b_relerr_T{}_size{}.png'.format(temperature, 2))
# plt.show('hold')
def task_c(temperatures=(1,)):
trial_sizes = [10, 30, 100, 300, 1000, 3000, 10000, 30000, 100000] # 10**np.arange(1, 5)
for temperature in temperatures:
print temperature
size = 20
for spin_matrix in ['ordered', 'random']:
data, names = compute_monte_carlo(temperature, size, trial_sizes, spin_matrix)
plt.figure()
plot_mean_energy_and_magnetization(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
plt.figure()
plot_total_number_of_accepted_configs(trial_sizes, data, names, temperature, spin_matrix)
plt.savefig('task_c_accepted_T{}_size{}_{}.png'.format(temperature, 20, spin_matrix))
# plt.show('hold')
def task_d(temperatures=(1,)):
trials = int(1e5)
burn_in = int(1e5)
size = 20
for temperature in temperatures:
print temperature
for spin_matrix in ['ordered', 'random']:
ecounter = ECounter(size, temperature)
print spin_matrix
t0 = timer()
E_mean, E_variance = monteCarlo(temperature, size, trials, spin_matrix=spin_matrix,
burn_in=burn_in,
ecounter=ecounter)[:2]
print 'elapsed time: {} seconds'.format(timer() - t0)
# ecounter = ecounter.counter
pdf, E = ecounter.to_pdf_per_cell()
mean, s2 = ecounter.mean_and_variance_per_cell()
if np.abs(E_variance-s2)> 1e-3: # Check that it is equal
print 'Error', E_mean-mean, E_variance-s2
plt.figure()
plt.plot(E, pdf, '.', label='$\sigma^2_{E/N}$ =' + (' {:2.3f}'.format(E_variance * temperature**2)))
plt.title('T={} $[k_b K / J]$, {} spin matrix'.format(temperature, spin_matrix))
plt.ylabel('Probability $P(E/N)$')
plt.xlabel('Energy per cell $E/N$')
plt.legend()
plt.savefig('task_d_probability_T{}_size{}_{}.png'.format(temperature, size, spin_matrix))
#plt.show('hold')
if __name__ == '__main__':
# T=2.4
#
# print energy_mean2(T), energy_mean_asymptotic(T), magnetization_spontaneous_asymptotic(T), magnetization_mean2(T)
# print energy_variance2(T)/energy_variance(T)
# task_b(temperatures=(1,2.4))
# task_c(temperatures=(1, 2.0, 2.4, 5, 10))
# task_c(temperatures=(1, 2.4))
task_d(temperatures=(1, 2.4))
plt.show('hold')
# Main program
# # Get input
# # size, trials, temperature, method = read_input()
# size = 2
# trials = 1000
# temperature = 4
# method = -1
#
# main(size, trials, temperature, method)
# print energy_mean_and_variance2(temperature)
# print magnetization_mean_and_variance2(temperature)
# print magnetization_abs_mean_and_variance2(temperature)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Comment splicer for lib2to3 trees.
The lib2to3 syntax tree produced by the parser holds comments and whitespace in
prefix attributes of nodes, rather than nodes themselves. This module provides
functionality to splice comments out of prefixes and into nodes of their own,
making them easier to process.
SpliceComments(): the main function exported by this module.
"""
from lib2to3 import pygram
from lib2to3 import pytree
from lib2to3.pgen2 import token
from yapf.yapflib import pytree_utils
def SpliceComments(tree):
"""Given a pytree, splice comments into nodes of their own right.
Extract comments from the prefixes where they are housed after parsing.
The prefixes that previously housed the comments become empty.
Args:
tree: a pytree.Node - the tree to work on. The tree is modified by this
function.
"""
# The previous leaf node encountered in the traversal.
# This is a list because Python 2.x doesn't have 'nonlocal' :)
prev_leaf = [None]
_AnnotateIndents(tree)
def _VisitNodeRec(node):
# This loop may insert into node.children, so we'll iterate over a copy.
for child in node.children[:]:
if isinstance(child, pytree.Node):
# Nodes don't have prefixes.
_VisitNodeRec(child)
else:
if child.prefix.lstrip().startswith('#'):
# We have a comment prefix in this child, so splicing is needed.
comment_prefix = child.prefix
comment_lineno = child.lineno - comment_prefix.count('\n')
# Remember the leading indentation of this prefix and clear it.
# Mopping up the prefix is important because we may go over this same
# child in the next iteration...
child_prefix = child.prefix.lstrip('\n')
prefix_indent = child_prefix[:child_prefix.find('#')]
child.prefix = ''
if child.type == token.NEWLINE:
# If the prefix was on a NEWLINE leaf, it's part of the line so it
# will be inserted after the previously encountered leaf.
# We can't just insert it before the NEWLINE node, because as a
# result of the way pytrees are organized, this node can be under
# an inappropriate parent.
assert prev_leaf[0] is not None
pytree_utils.InsertNodesAfter(
_CreateCommentsFromPrefix(comment_prefix, comment_lineno,
standalone=False), prev_leaf[0])
elif child.type == token.DEDENT:
# Comment prefixes on DEDENT nodes also deserve special treatment,
# because their final placement depends on their prefix.
# We'll look for an ancestor of this child with a matching
# indentation, and insert the comment after it.
ancestor_at_indent = _FindAncestorAtIndent(child, prefix_indent)
if ancestor_at_indent.type == token.DEDENT:
# Special case where the comment is inserted in the same
# indentation level as the DEDENT it was originally attached to.
pytree_utils.InsertNodesBefore(_CreateCommentsFromPrefix(
comment_prefix, comment_lineno,
standalone=True), ancestor_at_indent)
else:
pytree_utils.InsertNodesAfter(_CreateCommentsFromPrefix(
comment_prefix, comment_lineno,
standalone=True), ancestor_at_indent)
else:
# Otherwise there are two cases.
#
# 1. The comment is on its own line
# 2. The comment is part of an expression.
#
# Unfortunately, it's fairly difficult to distinguish between the
# two in lib2to3 trees. The algorithm here is to determine whether
# child is the first leaf in the statement it belongs to. If it is,
# then the comment (which is a prefix) belongs on a separate line.
# If it is not, it means the comment is buried deep in the statement
# and is part of some expression.
stmt_parent = _FindStmtParent(child)
for leaf_in_parent in stmt_parent.leaves():
if leaf_in_parent.type == token.NEWLINE:
continue
elif id(leaf_in_parent) == id(child):
# This comment stands on its own line, and it has to be inserted
# into the appropriate parent. We'll have to find a suitable
# parent to insert into. See comments above
# _STANDALONE_LINE_NODES for more details.
node_with_line_parent = _FindNodeWithStandaloneLineParent(child)
pytree_utils.InsertNodesBefore(_CreateCommentsFromPrefix(
comment_prefix, comment_lineno,
standalone=True), node_with_line_parent)
break
else:
if comment_lineno == prev_leaf[0].lineno:
comment_lines = comment_prefix.splitlines()
value = comment_lines[0].lstrip()
comment_leaf = pytree.Leaf(type=token.COMMENT,
value=value.rstrip('\n'),
context=('', (comment_lineno, 0)))
pytree_utils.InsertNodesAfter([comment_leaf], prev_leaf[0])
comment_prefix = '\n'.join(comment_lines[1:])
comment_lineno += 1
comments = _CreateCommentsFromPrefix(comment_prefix,
comment_lineno,
standalone=False)
pytree_utils.InsertNodesBefore(comments, child)
break
prev_leaf[0] = child
_VisitNodeRec(tree)
def _CreateCommentsFromPrefix(comment_prefix, comment_lineno, standalone=False):
"""Create pytree nodes to represent the given comment prefix.
Args:
comment_prefix: (unicode) the text of the comment from the node's prefix.
comment_lineno: (int) the line number for the start of the comment.
standalone: (bool) determines if the comment is standalone or not.
Returns:
The simple_stmt nodes if this is a standalone comment, otherwise a list of
new COMMENT leafs. The prefix may consist of multiple comment blocks,
separated by blank lines. Each block gets its own leaf.
"""
# The comment is stored in the prefix attribute, with no lineno of its
# own. So we only know at which line it ends. To find out at which line it
# starts, look at how many newlines the comment itself contains.
comments = []
lines = comment_prefix.split('\n')
index = 0
while index < len(lines):
comment_block = []
while index < len(lines) and lines[index].lstrip().startswith('#'):
comment_block.append(lines[index])
index += 1
if comment_block:
new_column = len(comment_block[0]) - len(comment_block[0].lstrip())
new_lineno = comment_lineno + index - 1
comment_block[0] = comment_block[0].lstrip()
comment_block[-1] = comment_block[-1].rstrip('\n')
comment_leaf = pytree.Leaf(type=token.COMMENT,
value='\n'.join(comment_block),
context=('', (new_lineno, new_column)))
comment_node = comment_leaf if not standalone else pytree.Node(
pygram.python_symbols.simple_stmt, [comment_leaf])
comments.append(comment_node)
while index < len(lines) and not lines[index].lstrip():
index += 1
return comments
# "Standalone line nodes" are tree nodes that have to start a new line in Python
# code (and cannot follow a ';' or ':'). Other nodes, like 'expr_stmt', serve as
# parents of other nodes but can come later in a line. This is a list of
# standalone line nodes in the grammar. It is meant to be exhaustive
# *eventually*, and we'll modify it with time as we discover more corner cases
# in the parse tree.
#
# When splicing a standalone comment (i.e. a comment that appears on its own
# line, not on the same line with other code), it's important to insert it into
# an appropriate parent of the node it's attached to. An appropriate parent
# is the first "standaline line node" in the parent chain of a node.
_STANDALONE_LINE_NODES = frozenset(
['suite', 'if_stmt', 'while_stmt', 'for_stmt', 'try_stmt', 'with_stmt',
'funcdef', 'classdef', 'decorated', 'file_input'])
def _FindNodeWithStandaloneLineParent(node):
"""Find a node whose parent is a 'standalone line' node.
See the comment above _STANDALONE_LINE_NODES for more details.
Arguments:
node: node to start from
Returns:
Suitable node that's either the node itself or one of its ancestors.
"""
if pytree_utils.NodeName(node.parent) in _STANDALONE_LINE_NODES:
return node
else:
# This is guaranteed to terminate because 'file_input' is the root node of
# any pytree.
return _FindNodeWithStandaloneLineParent(node.parent)
# "Statement nodes" are standalone statements. The don't have to start a new
# line.
_STATEMENT_NODES = frozenset(['simple_stmt']) | _STANDALONE_LINE_NODES
def _FindStmtParent(node):
"""Find the nearest parent of node that is a statement node.
Arguments:
node: node to start from
Returns:
Nearest parent (or node itself, if suitable).
"""
if pytree_utils.NodeName(node) in _STATEMENT_NODES:
return node
else:
return _FindStmtParent(node.parent)
def _FindAncestorAtIndent(node, indent):
"""Find an ancestor of node with the given indentation.
Arguments:
node: node to start from. This must not be the tree root.
indent: indentation string for the ancestor we're looking for.
See _AnnotateIndents for more details.
Returns:
An ancestor node with suitable indentation. If no suitable ancestor is
found, the closest ancestor to the tree root is returned.
"""
if node.parent.parent is None:
# Our parent is the tree root, so there's nowhere else to go.
return node
else:
# If the parent has an indent annotation, and it's shorter than node's
# indent, this is a suitable ancestor.
# The reason for "shorter" rather than "equal" is that comments may be
# improperly indented (i.e. by three spaces, where surrounding statements
# have either zero or two or four), and we don't want to propagate them all
# the way to the root.
parent_indent = pytree_utils.GetNodeAnnotation(
node.parent, pytree_utils.Annotation.CHILD_INDENT)
if parent_indent is not None and indent.startswith(parent_indent):
return node
else:
# Keep looking up the tree.
return _FindAncestorAtIndent(node.parent, indent)
def _AnnotateIndents(tree):
"""Annotate the tree with child_indent annotations.
A child_indent annotation on a node specifies the indentation (as a string,
like " ") of its children. It is inferred from the INDENT child of a node.
Arguments:
tree: root of a pytree. The pytree is modified to add annotations to nodes.
Raises:
RuntimeError: if the tree is malformed.
"""
# Annotate the root of the tree with zero indent.
if tree.parent is None:
pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT,
'')
for child in tree.children:
if child.type == token.INDENT:
child_indent = pytree_utils.GetNodeAnnotation(
tree, pytree_utils.Annotation.CHILD_INDENT)
if child_indent is not None and child_indent != child.value:
raise RuntimeError('inconsistent indentation for child', (tree, child))
pytree_utils.SetNodeAnnotation(tree, pytree_utils.Annotation.CHILD_INDENT,
child.value)
_AnnotateIndents(child)
|
|
import re
from collections import defaultdict
from enum import Enum
import pytest
from selenium.webdriver.common.by import By
class TriggerTypes(Enum):
KEY = "KEY"
STRING = "STRING"
class AssertionObject:
def __init__(self, expected_trigger_types=[], expected_hotkey_ids=[]):
self._trigger_types = expected_trigger_types
self._hotkey_ids = expected_hotkey_ids
def check(self, browser, parser, subtests):
page = browser.page_source
if self._trigger_types:
checked_selectors = browser.find_elements(
By.CSS_SELECTOR, "input[type='radio']:checked"
)
values = [selector.get_attribute("value") for selector in checked_selectors]
with subtests.test(
expected_trigger_types=self._trigger_types, actual_trigger_types=values
):
assert values == [trigger_type.value for trigger_type in self._trigger_types]
if self._hotkey_ids:
parsed = parser(page)
row_id_inputs = parsed.find_all("input", {"class": "js-index"})
values = [id_input["value"] for id_input in row_id_inputs]
with subtests.test(expected_hotkey_ids=self._hotkey_ids, hotkey_ids=values):
assert values == self._hotkey_ids
def _get_elements_through_browser(
path_type, path, filter, filter_attr, browser,
):
elements = browser.find_elements(path_type, path)
desired_elements = [i for i in elements if filter(i.get_attribute(filter_attr))]
return desired_elements
def _get_elements_and_desired_value_through_browser(
path_type, path, filter, filter_attr, desired_attr, browser, sort_attribute="name"
):
desired_elements = _get_elements_through_browser(path_type, path, filter, filter_attr, browser)
result = {}
for element in desired_elements:
sort_key = element.get_attribute(sort_attribute)
if sort_key in result:
if not isinstance(result[sort_key], list):
result[sort_key] = [result[sort_key]]
result[sort_key].append(element.get_attribute(desired_attr))
else:
result[sort_key] = element.get_attribute(desired_attr)
return result
def __sanitize_html_inputs(function_signature):
r"""
>>> __sanitize_html_inputs('ActivateOrOpen( "<input type="text" name="Window0" id="window0" placeholder="Window" class="keyWidth" oninput="markDirty()" required="">", <span class="w3-hide-large"><br></span> "<input id="program0" type="text" name="Program0" placeholder="Program" class="keyWidth" oninput="markDirty()" required="">") <input type="hidden" value="ActivateOrOpen" name="option0" id="option0">')
'ActivateOrOpen("\\{Window0\\}", "\\{Program0\\}")'
>>> __sanitize_html_inputs('Send( "<input name="input0" id="input0" type="text" placeholder="input" oninput="markDirty()" required="">") <input type="hidden" value="Send" name="option0" id="option0">')
'Send("\\{input0\\}")'
>>> __sanitize_html_inputs('Replace( "<input type="text" name="input0" id="input0" placeholder="input" oninput="markDirty()" required="">") <input type="hidden" value="Replace" name="option0" id="option0">')
'Replace("\\{input0\\}")'
>>> __sanitize_html_inputs('SendUnicodeChar(<input name="input0" id="input0" type="text" placeholder="0x000" class="keyWidth" oninput="markDirty()" required="">)')
'SendUnicodeChar(\\{input0\\})'
>>> __sanitize_html_inputs('Custom: <textarea name=\"Code0\" id=\"code0\" placeholder=\"code\" class=\"codeArea\" oninput=\"markDirty()\" required=\"\"></textarea>)')
'Custom: \\{Code0\\})'
>>> __sanitize_html_inputs('<span title="Removes what was just typed (for hotstring, but treated as Send for hotkey) and sends the valued\ni.e. Replace("by the way") can be used with a hotstring of btw to cause it to be expanded when typed">Replace(\n "\\{text0\\}"\n)</span>')
'Replace("\\{text0\\}")'
>>> __sanitize_html_inputs('ActivateOrOpenChrome(<span class="w3-hide-large w3-hide-medium"><br/></span>"<input type="text" name="Window0" id="window0" placeholder="tab name" class="keyWidth" oninput="markDirty()" required/>", <span class="w3-hide-large"><br/></span>"<input id="program0" type="text" name="Program0" placeholder="URL" class="keyWidth" oninput="markDirty()" required/>")')
'ActivateOrOpenChrome("\\{Window0\\}", "\\{Program0\\}")'
>>> __sanitize_html_inputs('<span title="A sandbox for creating your own usage of the hotkey/hotstring">Custom:<span class="may-break-space"> </span><span class="w3-hide-large "><br></span><textarea name="Code0" id="code0" placeholder="Code" class="keyWidth" oninput="markDirty()" title="" required=""></textarea></span>')
'Custom: \\{Code0\\}'
"""
_arg_regex = r"(\"?)\<(input|textarea) .*?name=\"(.+?)\".+?\>(?:\<\/\2\>)?\1"
# remove hidden option input
function_signature = re.sub(r"\<input type=\"hidden\".+?\/?\>", "", function_signature).strip()
# remove title text span
function_signature = re.sub(
r"\<span title=\"[\d\D]+?\"[\d\D]*?\>([\d\D]+)\<\/span\>", r"\1", function_signature,
) # TODO: after integration, add title to testing
# repace arg inputs with names
function_signature = re.sub(_arg_regex, r"\1\{\3\}\1", function_signature).replace("\t", "")
# clean up newlines
function_signature = re.sub(r"(?:\\n|\n)", r"", function_signature)
# remove white space before quote marks, except after commas
function_signature = re.sub(r"([^,])[\s \n]+\"", r'\1"', function_signature)
# remove page break insertions
function_signature = re.sub(r"\<span .+?\<br\/?\>\<\/span\>", "", function_signature)
# add spaces after commas (like in-between parameters)
function_signature = re.sub(r",([\"\'])", r", \1", function_signature)
# add space between : and arg
function_signature = re.sub(r"\:\\\{", r": \\{", function_signature)
return function_signature
def _get_input(selector, matcher, desired_value, id_filter, browser, parser, dest_name, data_store):
trigger_type_inputs = _get_elements_and_desired_value_through_browser(
By.CSS_SELECTOR, selector, matcher, "name", desired_value, browser,
)
for name, trigger_type in trigger_type_inputs.items():
id_value = id_filter(name)
data_store[id_value][dest_name] = trigger_type
def loaded_data(browser, parser):
data = defaultdict(dict)
page = browser.page_source
parsed = parser(page)
uses_ids = True
row_id_inputs = parsed.find_all("input", {"class": "js-index"})
hotkey_ids = [id_input["value"] for id_input in row_id_inputs]
if not hotkey_ids:
uses_ids = False
# else:
# data["hotkey_ids"] = hotkey_ids
_get_input(
selector="input[type='radio']:checked",
matcher=lambda v: v.startswith("func") and v[-1].isnumeric(),
desired_value="value",
id_filter=lambda name: name[len("func") :],
dest_name="trigger_type",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='text']",
matcher=lambda v: v.startswith("comment"),
desired_value="value",
id_filter=lambda name: name[len("comment") :],
dest_name="comment",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='text']",
matcher=lambda v: v.startswith("skeyValue"),
desired_value="value",
id_filter=lambda name: name[len("skeyValue") :],
dest_name="trigger_keys",
browser=browser,
parser=parser,
data_store=data,
)
_get_input(
selector="input[type='checkbox']:checked",
matcher=lambda name: name.startswith("skey") and name.endswith("[]"),
desired_value="value",
id_filter=lambda name: name[len("skey") : -2],
dest_name="modifier_keys",
browser=browser,
parser=parser,
data_store=data,
)
selected_functions = _get_elements_through_browser(
By.CSS_SELECTOR,
path="span",
filter=lambda id: id.startswith("function"),
filter_attr="id",
browser=browser,
)
for function in selected_functions:
html_id = function.get_attribute("id")
id_value = html_id[len("function") :]
function_signature = function.get_attribute("innerHTML")
function_signature = __sanitize_html_inputs(function_signature)
args = _get_elements_and_desired_value_through_browser(
By.CSS_SELECTOR,
r'input[type="text"], textarea',
filter=lambda _: True,
filter_attr="name",
desired_attr="value",
browser=function,
)
data[id_value]["action"] = {"function": function_signature, "args": args}
return dict(data)
|
|
"""Database models used by django-reversion."""
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import models, IntegrityError
from django.dispatch.dispatcher import Signal
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text, python_2_unicode_compatible
def safe_revert(versions):
"""
Attempts to revert the given models contained in the give versions.
This method will attempt to resolve dependencies between the versions to revert
them in the correct order to avoid database integrity errors.
"""
unreverted_versions = []
for version in versions:
try:
version.revert()
except (IntegrityError, ObjectDoesNotExist):
unreverted_versions.append(version)
if len(unreverted_versions) == len(versions):
raise RevertError("Could not revert revision, due to database integrity errors.")
if unreverted_versions:
safe_revert(unreverted_versions)
class RevertError(Exception):
"""Exception thrown when something goes wrong with reverting a model."""
UserModel = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
@python_2_unicode_compatible
class Revision(models.Model):
"""A group of related object versions."""
manager_slug = models.CharField(
max_length = 200,
db_index = True,
default = "default",
)
date_created = models.DateTimeField(auto_now_add=True,
db_index=True,
verbose_name=_("date created"),
help_text="The date and time this revision was created.")
user = models.ForeignKey(UserModel,
blank=True,
null=True,
on_delete=models.SET_NULL,
verbose_name=_("user"),
help_text="The user who created this revision.")
comment = models.TextField(blank=True,
verbose_name=_("comment"),
help_text="A text comment on this revision.")
def revert(self, delete=False):
"""Reverts all objects in this revision."""
version_set = self.version_set.all()
# Optionally delete objects no longer in the current revision.
if delete:
# Get a dict of all objects in this revision.
old_revision = {}
for version in version_set:
try:
obj = version.object
except ContentType.objects.get_for_id(version.content_type_id).model_class().DoesNotExist:
pass
else:
old_revision[obj] = version
# Calculate the set of all objects that are in the revision now.
from reversion.revisions import RevisionManager
current_revision = RevisionManager.get_manager(self.manager_slug)._follow_relationships(obj for obj in old_revision.keys() if obj is not None)
# Delete objects that are no longer in the current revision.
for item in current_revision:
if item not in old_revision:
item.delete()
# Attempt to revert all revisions.
safe_revert(version_set)
def __str__(self):
"""Returns a unicode representation."""
return ", ".join(force_text(version) for version in self.version_set.all())
#Meta
class Meta:
app_label = 'reversion'
def has_int_pk(model):
"""Tests whether the given model has an integer primary key."""
pk = model._meta.pk
return (
(
isinstance(pk, (models.IntegerField, models.AutoField)) and
not isinstance(pk, models.BigIntegerField)
) or (
isinstance(pk, models.ForeignKey) and has_int_pk(pk.rel.to)
)
)
@python_2_unicode_compatible
class Version(models.Model):
"""A saved version of a database model."""
revision = models.ForeignKey(Revision,
help_text="The revision that contains this version.")
object_id = models.TextField(help_text="Primary key of the model under version control.")
object_id_int = models.IntegerField(
blank = True,
null = True,
db_index = True,
help_text = "An indexed, integer version of the stored model's primary key, used for faster lookups.",
)
content_type = models.ForeignKey(ContentType,
help_text="Content type of the model under version control.")
# A link to the current instance, not the version stored in this Version!
object = generic.GenericForeignKey()
format = models.CharField(max_length=255,
help_text="The serialization format used by this model.")
serialized_data = models.TextField(help_text="The serialized form of this version of the model.")
object_repr = models.TextField(help_text="A string representation of the object.")
@property
def object_version(self):
"""The stored version of the model."""
data = self.serialized_data
data = force_text(data.encode("utf8"))
return list(serializers.deserialize(self.format, data, ignorenonexistent=True))[0]
@property
def field_dict(self):
"""
A dictionary mapping field names to field values in this version
of the model.
This method will follow parent links, if present.
"""
if not hasattr(self, "_field_dict_cache"):
object_version = self.object_version
obj = object_version.object
result = {}
for field in obj._meta.fields:
result[field.name] = field.value_from_object(obj)
result.update(object_version.m2m_data)
# Add parent data.
for parent_class, field in obj._meta.concrete_model._meta.parents.items():
if obj._meta.proxy and parent_class == obj._meta.concrete_model:
continue
content_type = ContentType.objects.get_for_model(parent_class)
if field:
parent_id = force_text(getattr(obj, field.attname))
else:
parent_id = obj.pk
try:
parent_version = Version.objects.get(revision__id=self.revision_id,
content_type=content_type,
object_id=parent_id)
except Version.DoesNotExist:
pass
else:
result.update(parent_version.field_dict)
setattr(self, "_field_dict_cache", result)
return getattr(self, "_field_dict_cache")
def revert(self):
"""Recovers the model in this version."""
self.object_version.save()
def __str__(self):
"""Returns a unicode representation."""
return self.object_repr
#Meta
class Meta:
app_label = 'reversion'
# Version management signals.
pre_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
post_revision_commit = Signal(providing_args=["instances", "revision", "versions"])
|
|
# -*- coding: utf-8 -*-
"""Python has a very powerful mapping type at its core: the :class:`dict`
type. While versatile and featureful, the :class:`dict` prioritizes
simplicity and performance. As a result, it does not retain the order
of item insertion [1]_, nor does it store multiple values per key. It
is a fast, unordered 1:1 mapping.
The :class:`OrderedMultiDict` contrasts to the built-in :class:`dict`,
as a relatively maximalist, ordered 1:n subtype of
:class:`dict`. Virtually every feature of :class:`dict` has been
retooled to be intuitive in the face of this added
complexity. Additional methods have been added, such as
:class:`collections.Counter`-like functionality.
A prime advantage of the :class:`OrderedMultiDict` (OMD) is its
non-destructive nature. Data can be added to an :class:`OMD` without being
rearranged or overwritten. The property can allow the developer to
work more freely with the data, as well as make more assumptions about
where input data will end up in the output, all without any extra
work.
One great example of this is the :meth:`OMD.inverted()` method, which
returns a new OMD with the values as keys and the keys as values. All
the data and the respective order is still represented in the inverted
form, all from an operation which would be outright wrong and reckless
with a built-in :class:`dict` or :class:`collections.OrderedDict`.
The OMD has been performance tuned to be suitable for a wide range of
usages, including as a basic unordered MultiDict. Special
thanks to `Mark Williams`_ for all his help.
.. [1] As of 2015, `basic dicts on PyPy are ordered
<http://morepypy.blogspot.com/2015/01/faster-more-memory-efficient-and-more.html>`_.
.. _Mark Williams: https://github.com/markrwilliams
"""
from collections import KeysView, ValuesView, ItemsView
try:
from itertools import izip
except ImportError:
izip = zip # Python 3
try:
from compat import make_sentinel
_MISSING = make_sentinel(var_name='_MISSING')
except ImportError:
_MISSING = object()
PREV, NEXT, KEY, VALUE, SPREV, SNEXT = range(6)
__all__ = ['MultiDict', 'OMD', 'OrderedMultiDict']
try:
profile
except NameError:
profile = lambda x: x
class OrderedMultiDict(dict):
"""A MultiDict is a dictionary that can have multiple values per key
and the OrderedMultiDict (OMD) is a MultiDict that retains
original insertion order. Common use cases include:
* handling query strings parsed from URLs
* inverting a dictionary to create a reverse index (values to keys)
* stacking data from multiple dictionaries in a non-destructive way
The OrderedMultiDict constructor is identical to the built-in
:class:`dict`, and overall the API is constitutes an intuitive
superset of the built-in type:
>>> omd = OrderedMultiDict()
>>> omd['a'] = 1
>>> omd['b'] = 2
>>> omd.add('a', 3)
>>> omd.get('a')
3
>>> omd.getlist('a')
[1, 3]
Some non-:class:`dict`-like behaviors also make an appearance,
such as support for :func:`reversed`:
>>> list(reversed(omd))
['b', 'a']
Note that unlike some other MultiDicts, this OMD gives precedence
to the most recent value added. ``omd['a']`` refers to ``3``, not
``1``.
>>> omd
OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> omd.poplast('a')
3
>>> omd
OrderedMultiDict([('a', 1), ('b', 2)])
>>> omd.pop('a')
1
>>> omd
OrderedMultiDict([('b', 2)])
Note that calling :func:`dict` on an OMD results in a dict of keys
to *lists* of values:
>>> from pprint import pprint as pp # ensuring proper key ordering
>>> omd = OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)])
>>> pp(dict(omd))
{'a': [1, 3], 'b': [2]}
Note that modifying those lists will modify the OMD. If you want a
safe-to-modify or flat dictionary, use :meth:`OrderedMultiDict.todict()`.
>>> pp(omd.todict())
{'a': 3, 'b': 2}
>>> pp(omd.todict(multi=True))
{'a': [1, 3], 'b': [2]}
With ``multi=False``, items appear with the keys in to original
insertion order, alongside the most-recently inserted value for
that key.
>>> OrderedMultiDict([('a', 1), ('b', 2), ('a', 3)]).items(multi=False)
[('a', 3), ('b', 2)]
"""
def __init__(self, *args, **kwargs):
if len(args) > 1:
raise TypeError('%s expected at most 1 argument, got %s'
% (self.__class__.__name__, len(args)))
super(OrderedMultiDict, self).__init__()
self._clear_ll()
if args:
self.update_extend(args[0])
if kwargs:
self.update(kwargs)
def _clear_ll(self):
try:
_map = self._map
except AttributeError:
_map = self._map = {}
self.root = []
_map.clear()
self.root[:] = [self.root, self.root, None]
def _insert(self, k, v):
root = self.root
cells = self._map.setdefault(k, [])
last = root[PREV]
cell = [last, root, k, v]
last[NEXT] = root[PREV] = cell
cells.append(cell)
def add(self, k, v):
"""Add a single value *v* under a key *k*. Existing values under *k*
are preserved.
"""
self_insert = self._insert
values = super(OrderedMultiDict, self).setdefault(k, [])
self_insert(k, v)
values.append(v)
def addlist(self, k, v):
"""Add an iterable of values underneath a specific key, preserving
any values already under that key.
Called ``addlist`` for consistency with :meth:`getlist`, but
tuples and other sequences and iterables work.
"""
self_insert = self._insert
values = super(OrderedMultiDict, self).setdefault(k, [])
for subv in v:
self_insert(k, subv)
values.extend(v)
def get(self, k, default=None):
"""Return the value for key *k* if present in the dictionary, else
*default*. If *default* is not given, ``None`` is returned.
This method never raises a :exc:`KeyError`.
To get all values under a key, use :meth:`OrderedMultiDict.getlist`.
"""
return super(OrderedMultiDict, self).get(k, [default])[-1]
def getlist(self, k, default=_MISSING):
"""Get all values for key *k* as a list, if *k* is in the
dictionary, else *default*. The list returned is a copy and
can be safely mutated. If *default* is not given, an empty
:class:`list` is returned.
"""
try:
return super(OrderedMultiDict, self).__getitem__(k)[:]
except KeyError:
if default is _MISSING:
return []
return default
def clear(self):
"Empty the dictionary."
super(OrderedMultiDict, self).clear()
self._clear_ll()
def setdefault(self, k, default=_MISSING):
"""If key *k* is in the dictionary, return its value. If not, insert
*k* with a value of *default* and return *default*. *default*
defaults to ``None``. See :meth:`dict.setdefault` for more
information.
"""
if not super(OrderedMultiDict, self).__contains__(k):
self[k] = [] if default is _MISSING else [default]
return default
def copy(self):
"Return a shallow copy of the dictionary."
return self.__class__(self.iteritems(multi=True))
@classmethod
def fromkeys(cls, keys, default=None):
"""Create a dictionary from a list of keys, with all the values
set to *default*, or ``None`` if *default* is not set.
"""
return cls([(k, default) for k in keys])
def update(self, E, **F):
"""Add items from a dictionary or iterable (and/or keyword arguments),
overwriting values under an existing key. See
:meth:`dict.update` for more details.
"""
# E and F are throwback names to the dict() __doc__
if E is self:
return
self_add = self.add
if isinstance(E, OrderedMultiDict):
for k in E:
if k in self:
del self[k]
for k, v in E.iteritems(multi=True):
self_add(k, v)
elif hasattr(E, 'keys'):
for k in E.keys():
self[k] = E[k]
else:
seen = set()
seen_add = seen.add
for k, v in E:
if k not in seen and k in self:
del self[k]
seen_add(k)
self_add(k, v)
for k in F:
self[k] = F[k]
return
def update_extend(self, E, **F):
"""Add items from a dictionary, iterable, and/or keyword
arguments without overwriting existing items present in the
dictionary. Like :meth:`update`, but adds to existing keys
instead of overwriting them.
"""
if E is self:
iterator = iter(E.items())
elif isinstance(E, OrderedMultiDict):
iterator = E.iteritems(multi=True)
elif hasattr(E, 'keys'):
iterator = ((k, E[k]) for k in E.keys())
else:
iterator = E
self_add = self.add
for k, v in iterator:
self_add(k, v)
def __setitem__(self, k, v):
if super(OrderedMultiDict, self).__contains__(k):
self._remove_all(k)
self._insert(k, v)
super(OrderedMultiDict, self).__setitem__(k, [v])
def __getitem__(self, k):
return super(OrderedMultiDict, self).__getitem__(k)[-1]
def __delitem__(self, k):
super(OrderedMultiDict, self).__delitem__(k)
self._remove_all(k)
def __eq__(self, other):
if self is other:
return True
elif len(other) != len(self):
return False
elif isinstance(other, OrderedMultiDict):
selfi = self.iteritems(multi=True)
otheri = other.iteritems(multi=True)
for (selfk, selfv), (otherk, otherv) in izip(selfi, otheri):
if selfk != otherk or selfv != otherv:
return False
if not(next(selfi, _MISSING) is _MISSING
and next(otheri, _MISSING) is _MISSING):
# leftovers (TODO: watch for StopIteration?)
return False
return True
elif hasattr(other, 'keys'):
for selfk in self:
try:
other[selfk] == self[selfk]
except KeyError:
return False
return True
return False
def __ne__(self, other):
return not (self == other)
def pop(self, k, default=_MISSING):
"""Remove all values under key *k*, returning the most-recently
inserted value. Raises :exc:`KeyError` if the key is not
present and no *default* is provided.
"""
return self.popall(k, default)[-1]
def popall(self, k, default=_MISSING):
"""Remove all values under key *k*, returning them in the form of
a list. Raises :exc:`KeyError` if the key is not present and no
*default* is provided.
"""
if super(OrderedMultiDict, self).__contains__(k):
self._remove_all(k)
if default is _MISSING:
return super(OrderedMultiDict, self).pop(k)
return super(OrderedMultiDict, self).pop(k, default)
def poplast(self, k=_MISSING, default=_MISSING):
"""Remove and return the most-recently inserted value under the key
*k*, or the most-recently inserted key if *k* is not
provided. If no values remain under *k*, it will be removed
from the OMD. Raises :exc:`KeyError` if *k* is not present in
the dictionary, or the dictionary is empty.
"""
if k is _MISSING:
if self:
k = self.root[PREV][KEY]
else:
raise KeyError('empty %r' % type(self))
try:
self._remove(k)
except KeyError:
if default is _MISSING:
raise KeyError(k)
return default
values = super(OrderedMultiDict, self).__getitem__(k)
v = values.pop()
if not values:
super(OrderedMultiDict, self).__delitem__(k)
return v
def _remove(self, k):
values = self._map[k]
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
if not values:
del self._map[k]
def _remove_all(self, k):
values = self._map[k]
while values:
cell = values.pop()
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
del self._map[k]
def iteritems(self, multi=False):
"""Iterate over the OMD's items in insertion order. By default,
yields only the most-recently inserted value for each key. Set
*multi* to ``True`` to get all inserted items.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY], curr[VALUE]
curr = curr[NEXT]
else:
for key in self.iterkeys():
yield key, self[key]
def iterkeys(self, multi=False):
"""Iterate over the OMD's keys in insertion order. By default, yields
each key once, according to the most recent insertion. Set
*multi* to ``True`` to get all keys, including duplicates, in
insertion order.
"""
root = self.root
curr = root[NEXT]
if multi:
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
else:
yielded = set()
yielded_add = yielded.add
while curr is not root:
k = curr[KEY]
if k not in yielded:
yielded_add(k)
yield k
curr = curr[NEXT]
def itervalues(self, multi=False):
"""Iterate over the OMD's values in insertion order. By default,
yields the most-recently inserted value per unique key. Set
*multi* to ``True`` to get all values according to insertion
order.
"""
for k, v in self.iteritems(multi=multi):
yield v
def todict(self, multi=False):
"""Gets a basic :class:`dict` of the items in this dictionary. Keys
are the same as the OMD, values are the most recently inserted
values for each key.
Setting the *multi* arg to ``True`` is yields the same
result as calling :class:`dict` on the OMD, except that all the
value lists are copies that can be safely mutated.
"""
if multi:
return dict([(k, self.getlist(k)) for k in self])
return dict([(k, self[k]) for k in self])
def sorted(self, key=None, reverse=False):
"""Similar to the built-in :func:`sorted`, except this method returns
a new :class:`OrderedMultiDict` sorted by the provided key
function, optionally reversed.
Args:
key (callable): A callable to determine the s ort key of
each element. The callable should expect an **item**
(key-value pair tuple).
reverse (bool): Set to ``True`` to reverse the ordering.
>>> omd = OrderedMultiDict(zip(range(3), range(3)))
>>> omd.sorted(reverse=True)
OrderedMultiDict([(2, 2), (1, 1), (0, 0)])
Note that the key function receives an **item** (key-value
tuple), so the recommended signature looks like:
>>> omd = OrderedMultiDict(zip('hello', 'world'))
>>> omd.sorted(key=lambda i: i[1]) # i[0] is the key, i[1] is the val
OrderedMultiDict([('o', 'd'), ('l', 'l'), ('e', 'o'), ('h', 'w')])
"""
cls = self.__class__
return cls(sorted(self.iteritems(), key=key, reverse=reverse))
def inverted(self):
"""Returns a new :class:`OrderedMultiDict` with values and keys
swapped, like creating dictionary transposition or reverse
index. Insertion order is retained and all keys and values
are represented in the output.
>>> omd = OMD([(0, 2), (1, 2)])
>>> omd.inverted().getlist(2)
[0, 1]
Inverting twice yields a copy of the original:
>>> omd.inverted().inverted()
OrderedMultiDict([(0, 2), (1, 2)])
"""
return self.__class__((v, k) for k, v in self.iteritems(multi=True))
def counts(self):
"""Returns a mapping from key to number of values inserted under that
key. Like :py:class:`collections.Counter`, but returns a new
:class:`OrderedMultiDict`.
"""
# Returns an OMD because Counter/OrderedDict may not be
# available, and neither Counter nor dict maintain order.
super_getitem = super(OrderedMultiDict, self).__getitem__
return self.__class__((k, len(super_getitem(k))) for k in self)
def keys(self, multi=False):
"""Returns a list containing the output of :meth:`iterkeys`. See
that method's docs for more details.
"""
return list(self.iterkeys(multi=multi))
def values(self, multi=False):
"""Returns a list containing the output of :meth:`itervalues`. See
that method's docs for more details.
"""
return list(self.itervalues(multi=multi))
def items(self, multi=False):
"""Returns a list containing the output of :meth:`iteritems`. See
that method's docs for more details.
"""
return list(self.iteritems(multi=multi))
def __iter__(self):
return self.iterkeys()
def __reversed__(self):
root = self.root
curr = root[PREV]
lengths = {}
lengths_sd = lengths.setdefault
get_values = super(OrderedMultiDict, self).__getitem__
while curr is not root:
k = curr[KEY]
vals = get_values(k)
if lengths_sd(k, 1) == len(vals):
yield k
lengths[k] += 1
curr = curr[PREV]
def __repr__(self):
cn = self.__class__.__name__
kvs = ', '.join([repr((k, v)) for k, v in self.iteritems(multi=True)])
return '%s([%s])' % (cn, kvs)
def viewkeys(self):
"OMD.viewkeys() -> a set-like object providing a view on OMD's keys"
return KeysView(self)
def viewvalues(self):
"OMD.viewvalues() -> an object providing a view on OMD's values"
return ValuesView(self)
def viewitems(self):
"OMD.viewitems() -> a set-like object providing a view on OMD's items"
return ItemsView(self)
# A couple of convenient aliases
OMD = OrderedMultiDict
MultiDict = OrderedMultiDict
class FastIterOrderedMultiDict(OrderedMultiDict):
"""An OrderedMultiDict backed by a skip list. Iteration over keys
is faster and uses constant memory but adding duplicate key-value
pairs is slower. Brainchild of Mark Williams.
"""
def _clear_ll(self):
# TODO: always reset objects? (i.e., no else block below)
try:
_map = self._map
except AttributeError:
_map = self._map = {}
self.root = []
_map.clear()
self.root[:] = [self.root, self.root,
None, None,
self.root, self.root]
def _insert(self, k, v):
root = self.root
empty = []
cells = self._map.setdefault(k, empty)
last = root[PREV]
if cells is empty:
cell = [last, root,
k, v,
last, root]
# was the last one skipped?
if last[SPREV][SNEXT] is root:
last[SPREV][SNEXT] = cell
last[NEXT] = last[SNEXT] = root[PREV] = root[SPREV] = cell
cells.append(cell)
else:
# if the previous was skipped, go back to the cell that
# skipped it
sprev = last[SPREV] if not (last[SPREV][SNEXT] is last) else last
cell = [last, root,
k, v,
sprev, root]
# skip me
last[SNEXT] = root
last[NEXT] = root[PREV] = root[SPREV] = cell
cells.append(cell)
def _remove(self, k):
cells = self._map[k]
cell = cells.pop()
if not cells:
del self._map[k]
cell[PREV][SNEXT] = cell[SNEXT]
if cell[PREV][SPREV][SNEXT] is cell:
cell[PREV][SPREV][SNEXT] = cell[NEXT]
elif cell[SNEXT] is cell[NEXT]:
cell[SPREV][SNEXT], cell[SNEXT][SPREV] = cell[SNEXT], cell[SPREV]
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
def _remove_all(self, k):
cells = self._map.pop(k)
while cells:
cell = cells.pop()
if cell[PREV][SPREV][SNEXT] is cell:
cell[PREV][SPREV][SNEXT] = cell[NEXT]
elif cell[SNEXT] is cell[NEXT]:
cell[SPREV][SNEXT], cell[SNEXT][SPREV] = cell[SNEXT], cell[SPREV]
cell[PREV][NEXT], cell[NEXT][PREV] = cell[NEXT], cell[PREV]
cell[PREV][SNEXT] = cell[SNEXT]
def iteritems(self, multi=False):
next_link = NEXT if multi else SNEXT
root = self.root
curr = root[next_link]
while curr is not root:
yield curr[KEY], curr[VALUE]
curr = curr[next_link]
def iterkeys(self, multi=False):
next_link = NEXT if multi else SNEXT
root = self.root
curr = root[next_link]
while curr is not root:
yield curr[KEY]
curr = curr[next_link]
def __reversed__(self):
root = self.root
curr = root[PREV]
while curr is not root:
if curr[SPREV][SNEXT] is not curr:
curr = curr[SPREV]
if curr is root:
break
yield curr[KEY]
curr = curr[PREV]
# Tests follow
if __name__ == '__main__':
_ITEMSETS = [[],
[('a', 1), ('b', 2), ('c', 3)],
[('A', 'One'), ('A', 'One'), ('A', 'One')],
[('Z', -1), ('Y', -2), ('Y', -2)],
[('a', 1), ('b', 2), ('a', 3), ('c', 4)]]
def test_dict_init():
d = dict(_ITEMSETS[1])
omd = OMD(d)
assert omd['a'] == 1
assert omd['b'] == 2
assert omd['c'] == 3
assert len(omd) == 3
assert omd.getlist('a') == [1]
assert omd == d
def test_todict():
omd = OMD(_ITEMSETS[2])
assert len(omd) == 1
assert omd['A'] == 'One'
d = dict(omd)
assert len(d) == 1
assert d['A'] == ['One', 'One', 'One']
flat = omd.todict()
assert flat['A'] == 'One'
for itemset in _ITEMSETS:
omd = OMD(itemset)
d = dict(itemset)
flat = omd.todict()
assert flat == d
def test_eq():
omd = OMD(_ITEMSETS[3])
assert omd == omd
assert not (omd != omd)
omd2 = OMD(_ITEMSETS[3])
assert omd == omd2
assert omd2 == omd
assert not (omd != omd2)
d = dict(_ITEMSETS[3])
assert d == omd
omd3 = OMD(d)
assert omd != omd3
def test_copy():
for itemset in _ITEMSETS:
omd = OMD(itemset)
omd_c = omd.copy()
assert omd == omd_c
if omd_c:
omd_c.pop(itemset[0][0])
assert omd != omd_c
return
def test_clear():
for itemset in _ITEMSETS:
omd = OMD(itemset)
omd.clear()
assert len(omd) == 0
assert not omd
omd.clear()
assert not omd
omd['a'] = 22
assert omd
omd.clear()
assert not omd
def test_types():
import collections
omd = OMD()
assert isinstance(omd, dict)
assert isinstance(omd, collections.MutableMapping)
def test_multi_correctness():
size = 100
redun = 5
_rng = range(size)
_rng_redun = range(size/redun) * redun
_pairs = zip(_rng_redun, _rng)
omd = OMD(_pairs)
for multi in (True, False):
vals = [x[1] for x in omd.iteritems(multi=multi)]
strictly_ascending = all([x < y for x, y in zip(vals, vals[1:])])
assert strictly_ascending
return
def test_kv_consistency():
for itemset in _ITEMSETS:
omd = OMD(itemset)
for multi in (True, False):
items = omd.items(multi=multi)
keys = omd.keys(multi=multi)
values = omd.values(multi=multi)
assert keys == [x[0] for x in items]
assert values == [x[1] for x in items]
return
def test_update_basic():
omd = OMD(_ITEMSETS[1])
omd2 = OMD({'a': 10})
omd.update(omd2)
assert omd['a'] == 10
assert omd.getlist('a') == [10]
omd2_c = omd2.copy()
omd2_c.pop('a')
assert omd2 != omd2_c
def test_update():
for first, second in zip(_ITEMSETS, _ITEMSETS[1:]):
omd1 = OMD(first)
omd2 = OMD(second)
ref1 = dict(first)
ref2 = dict(second)
omd1.update(omd2)
ref1.update(ref2)
assert omd1.todict() == ref1
omd1_repr = repr(omd1)
omd1.update(omd1)
assert omd1_repr == repr(omd1)
def test_update_extend():
for first, second in zip(_ITEMSETS, _ITEMSETS[1:] + [[]]):
omd1 = OMD(first)
omd2 = OMD(second)
ref = dict(first)
orig_keys = set(omd1)
ref.update(second)
omd1.update_extend(omd2)
for k in omd2:
assert len(omd1.getlist(k)) >= len(omd2.getlist(k))
assert omd1.todict() == ref
assert orig_keys <= set(omd1)
def test_invert():
for items in _ITEMSETS:
omd = OMD(items)
iomd = omd.inverted()
assert len(omd) == len(iomd)
assert len(omd.items()) == len(iomd.items())
for val in omd.values():
assert val in iomd
def test_poplast():
for items in _ITEMSETS[1:]:
omd = OMD(items)
assert omd.poplast() == items[-1][-1]
def test_reversed():
from collections import OrderedDict
for items in _ITEMSETS:
omd = OMD(items)
od = OrderedDict(items)
for ik, ok in zip(reversed(od), reversed(omd)):
assert ik == ok
r100 = range(100)
omd = OMD(zip(r100, r100))
for i in r100:
omd.add(i, i)
r100.reverse()
assert list(reversed(omd)) == r100
|
|
import os
import pytest
import glob
from tests.lib.path import Path
from tests.lib import TestFailure
def test_install_from_future_wheel_version(script, data):
"""
Test installing a future wheel
"""
package = data.packages.join("futurewheel-3.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
package = data.packages.join("futurewheel-1.9-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_broken_wheel(script, data):
"""
Test that installing a broken wheel fails properly
"""
package = data.packages.join("brokenwheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
with pytest.raises(TestFailure):
result.assert_installed('futurewheel', without_egg_link=True,
editable=False)
def test_install_from_wheel(script, data):
"""
Test installing from a wheel (that has a script)
"""
result = script.pip(
'install', 'has.script==1.0', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'has.script-1.0.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
script_file = script.bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_with_extras(script, data):
"""
Test installing from a wheel with extras.
"""
result = script.pip(
'install', 'complex-dist[simple]', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
dist_info_folder = script.site_packages / 'complex_dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
def test_install_from_wheel_file(script, data):
"""
Test installing directly from a wheel file.
"""
package = data.packages.join("simple.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'simple.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
# header installs are broke in pypy virtualenvs
# https://github.com/pypa/virtualenv/issues/510
@pytest.mark.skipif("hasattr(sys, 'pypy_version_info')")
def test_install_from_wheel_with_headers(script, data):
"""
Test installing from a wheel file with headers
"""
package = data.packages.join("headers.dist-0.1-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'headers.dist-0.1.dist-info'
assert dist_info_folder in result.files_created, (dist_info_folder,
result.files_created,
result.stdout)
def test_install_wheel_with_target(script, data):
"""
Test installing a wheel using pip install --target
"""
script.pip('install', 'wheel')
target_dir = script.scratch_path / 'target'
result = script.pip(
'install', 'simple.dist==0.1', '-t', target_dir, '--use-wheel',
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'target' / 'simpledist' in result.files_created, (
str(result)
)
def test_install_wheel_with_root(script, data):
"""
Test installing a wheel using pip install --root
"""
root_dir = script.scratch_path / 'root'
result = script.pip(
'install', 'simple.dist==0.1', '--root', root_dir, '--use-wheel',
'--no-index', '--find-links=' + data.find_links,
)
assert Path('scratch') / 'root' in result.files_created
def test_install_from_wheel_installs_deps(script, data):
"""
Test can install dependencies of wheels
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, package,
)
result.assert_installed('source', editable=False)
def test_install_from_wheel_no_deps(script, data):
"""
Test --no-deps works with wheel installs
"""
# 'requires_source' depends on the 'source' project
package = data.packages.join("requires_source-1.0-py2.py3-none-any.whl")
result = script.pip(
'install', '--no-index', '--find-links', data.find_links, '--no-deps',
package,
)
pkg_folder = script.site_packages / 'source'
assert pkg_folder not in result.files_created
def test_install_user_wheel(script, virtualenv, data):
"""
Test user install from wheel (that has a script)
"""
virtualenv.system_site_packages = True
script.pip('install', 'wheel')
result = script.pip(
'install', 'has.script==1.0', '--user', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
)
egg_info_folder = script.user_site / 'has.script-1.0.dist-info'
assert egg_info_folder in result.files_created, str(result)
script_file = script.user_bin / 'script.py'
assert script_file in result.files_created
def test_install_from_wheel_gen_entrypoint(script, data):
"""
Test installing scripts (entry points are generated)
"""
result = script.pip(
'install', 'script.wheel1a==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
if os.name != "nt":
assert bool(os.access(script.base_path / wrapper_file, os.X_OK))
def test_install_from_wheel_with_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved)
"""
result = script.pip(
'install', 'script.wheel2a==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
def test_install_from_wheel_no_setuptools_entrypoint(script, data):
"""
Test that when we generate scripts, any existing setuptools wrappers in
the wheel are skipped.
"""
result = script.pip(
'install', 'script.wheel1==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
wrapper_helper = script.bin / 't1-script.py'
# The wheel has t1.exe and t1-script.py. We will be generating t1 or
# t1.exe depending on the platform. So we check that the correct wrapper
# is present and that the -script.py helper has been skipped. We can't
# easily test that the wrapper from the wheel has been skipped /
# overwritten without getting very platform-dependent, so omit that.
assert wrapper_file in result.files_created
assert wrapper_helper not in result.files_created
def test_skipping_setuptools_doesnt_skip_legacy(script, data):
"""
Test installing scripts (legacy scripts are preserved even when we skip
setuptools wrappers)
"""
result = script.pip(
'install', 'script.wheel2==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
legacy_file1 = script.bin / 'testscript1.bat'
legacy_file2 = script.bin / 'testscript2'
wrapper_helper = script.bin / 't1-script.py'
assert legacy_file1 in result.files_created
assert legacy_file2 in result.files_created
assert wrapper_helper not in result.files_created
def test_install_from_wheel_gui_entrypoint(script, data):
"""
Test installing scripts (gui entry points are generated)
"""
result = script.pip(
'install', 'script.wheel3==0.1', '--use-wheel', '--no-index',
'--find-links=' + data.find_links,
expect_error=False,
)
if os.name == 'nt':
wrapper_file = script.bin / 't1.exe'
else:
wrapper_file = script.bin / 't1'
assert wrapper_file in result.files_created
def test_wheel_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert any(exists)
def test_wheel_no_compiles_pyc(script, data):
"""
Test installing from wheel with --compile on
"""
script.pip(
"install", "--no-compile", "simple.dist==0.1", "--no-index",
"--find-links=" + data.find_links
)
# There are many locations for the __init__.pyc file so attempt to find
# any of them
exists = [
os.path.exists(script.site_packages_path / "simpledist/__init__.pyc"),
]
exists += glob.glob(
script.site_packages_path / "simpledist/__pycache__/__init__*.pyc"
)
assert not any(exists)
def test_install_from_wheel_uninstalls_old_version(script, data):
# regression test for https://github.com/pypa/pip/issues/1825
package = data.packages.join("simplewheel-1.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=True)
package = data.packages.join("simplewheel-2.0-py2.py3-none-any.whl")
result = script.pip('install', package, '--no-index', expect_error=False)
dist_info_folder = script.site_packages / 'simplewheel-2.0.dist-info'
assert dist_info_folder in result.files_created
dist_info_folder = script.site_packages / 'simplewheel-1.0.dist-info'
assert dist_info_folder not in result.files_created
|
|
from math import *
import numarray as N
def fit(x, y, sig=None):
#Given a set of data points x, y,
#with individual standard deviations sig,
#fit them to a straight line y = a + bx by minimizing chi-sq.
#Returned are a, b and their respective probable uncertainties siga and sigb,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
if sig is None or N.all(sig == 1.0):
mwt=False
else:
mwt=True
sx=0.0
sy=0.0
st2=0.0
b=0.0
ndata = len(x)
if mwt:
#Accumulate sums ...
ss=0.0
for i in range(ndata):
#...with weights
wt=1.0/sig[i]**2
ss += wt
sx += x[i]*wt
sy += y[i]*wt
else:
for i in range(ndata):
#...or without weights.
sx += x[i]
sy += y[i]
ss=ndata
sxoss=sx/ss
if mwt:
for i in range(ndata):
t=(x[i]-sxoss)/sig[i]
st2 += t*t
b += t*y[i]/sig[i]
else:
for i in range(ndata):
t=x[i]-sxoss
st2 += t*t
b += t*y[i]
b /= st2
#Solve for a, b, siga, and sigb.
a=(sy-sx*b)/ss
siga=sqrt((1.0+sx*sx/(ss*st2))/ss)
sigb=sqrt(1.0/st2)
#Calculate chi2.
chi2=0.0
if mwt:
for i in range(ndata):
chi2 += ((y[i] - a - b*x[i]) / sig[i])**2
else:
#For unweighted data evaluate typical sig using chi2,
#and adjust the standard deviations.
for i in range(ndata):
chi2 += (y[i] - a - b*x[i])**2
sigdat=sqrt(chi2/(ndata-2))
siga *= sigdat
sigb *= sigdat
return a, b, siga, sigb, chi2
def fit_slope(x, y, sig, mwt, a=0.0, siga=0.0):
#Given a set of data points x, y,
#with individual standard deviations sig,
#fit them to a straight line y = a + bx by minimizing chi-sq,
#where the intercept of the line is fixed.
#Returned are b and its probable uncertainty sigb,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
sx=0.0
sy=0.0
sxx=0.0
sxy=0.0
b=0.0
ndata = len(x)
if mwt:
#Accumulate sums ...
ss = 0.0
for i in range(ndata):
#...with weights
wt=1.0/sig[i]**2
ss += wt
sx += x[i]*wt
sy += y[i]*wt
sxx += x[i]*x[i]*wt
sxy += x[i]*y[i]*wt
else:
for i in range(ndata):
#...or without weights.
sx += x[i]
sy += y[i]
sxx += x[i]*x[i]
sxy += x[i]*y[i]
ss=ndata
#Solve for b and sigb.
b = (sxy - a*sx) / sxx
sigb = sqrt(1.0 / sxx + (siga * sx / sxx)**2)
chi2=0.0
#Calculate chi2.
if mwt:
for i in range(ndata):
chi2 += ((y[i] - a - b*x[i]) / sig[i])**2
else:
#For unweighted data evaluate typical sig using chi2,
#and adjust the standard deviations.
for i in range(ndata):
chi2 += (y[i] - a - b*x[i])**2
sigdat=sqrt(chi2/(ndata-1))
sigb *= sigdat
return b, sigb, chi2
def fit_intercept(x, y, sig, mwt, b=0.0, sigb=0.0):
#Given a set of data points x, y,
#with individual standard deviations sig,
#fit them to a straight line y = a + bx by minimizing chi-sq,
#where the intercept of the line is fixed.
#Returned are a and its probable uncertainty siga,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
sx=0.0
sy=0.0
a=0.0
ndata = len(x)
if mwt:
#Accumulate sums ...
ss = 0.0
for i in range(ndata):
#...with weights
wt=1.0/sig[i]**2
ss += wt
sx += x[i]*wt
sy += y[i]*wt
else:
for i in range(ndata):
#...or without weights.
sx += x[i]
sy += y[i]
ss=ndata
#Solve for a and siga.
a = (sy - b*sx) / ss
siga = sqrt(1.0 / ss + (sigb * sx / ss)**2)
chi2=0.0
#Calculate chi2.
if mwt:
for i in range(ndata):
chi2 += ((y[i] - a - b*x[i]) / sig[i])**2
else:
#For unweighted data evaluate typical sig using chi2,
#and adjust the standard deviations.
for i in range(ndata):
chi2 += (y[i] - a - b*x[i])**2
sigdat=sqrt(chi2/(ndata-1))
siga *= sigdat
return a, siga, chi2
def fit_i(x, y, sig, int_scat=0.0):
#Given a set of data points x, y,
#with individual measurement error standard deviations sig,
#fit them to a straight line relation y = a + bx
#with intrinsic scatter int_scat by minimizing chi-sq.
#Returned are a, b and their respective probable uncertainties siga and sigb,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
sigtot = N.sqrt(sig**2 + int_scat**2)
return fit(x, y, sigtot)
def fit_slope_i(x, y, sig, int_scat=0.0, a=0.0, siga=0.0):
#Given a set of data points x, y,
#with individual standard deviations sig,
#fit them to a straight line y = a + bx
#with intrinsic scatter int_scat by minimizing chi-sq.
#where the intercept of the line is fixed.
#Returned are b and its probable uncertainty sigb,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
sigtot = N.sqrt(sig**2 + int_scat**2)
return fit_slope(x, y, sigtot, 1, a, siga)
def fit_intercept_i(x, y, sig, int_scat, b=0.0, sigb=0.0):
#Given a set of data points x, y,
#with individual standard deviations sig,
#fit them to a straight line y = a + bx by minimizing chi-sq,
#where the intercept of the line is fixed.
#Returned are a and its probable uncertainty siga,
#the chi-square chi2, and the scatter sigdat.
#If mwt=0 on input, then the standard deviations are assumed to be unavailable:
#the normalization of chi2 is to unit standard deviation on all points.
sigtot = N.sqrt(sig**2 + int_scat**2)
return fit_intercept(x, y, sigtot, 1, b, sigb)
def ttest(n1, ave1, var1, n2, ave2, var2):
df = n1+n2-2
svar = ((n1-1)*var1+(n2-1)*var2)/df
t = (ave1-ave2)/sqrt(svar*(1.0/n1+1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t**2))
return t, prob
def tutest(n1, ave1, var1, n2, ave2, var2):
t = (ave1-ave2)/sqrt(var1/n1+var2/n2)
df = (var1/n1+var2/n2)**2/((var1/n1)**2/(n1-1)+(var2/n2)**2/(n2-1))
prob = betai(0.5*df,0.5,df/(df+t**2))
return t, prob
def ftest_samples(d1, d2):
m1 = mean(d1)
v1 = variance(d1, m=m1)
m2 = mean(d2)
v2 = variance(d2, m=m2)
if v1 > v2:
vr = v1/v2
df1 = len(d1)-1
df2 = len(d2)-1
else:
vr = v2/v1
df1 = len(d2)-1
df2 = len(d1)-1
prob = 2.0*betai(0.5*df2, 0.5*df1, df2/(df2+df1*vr))
if prob > 1.0: prob = 2.0 - prob
return prob
def ftest(n1, v1, n2, v2):
if v1 > v2:
vr = v1/v2
df1 = n1-1
df2 = n2-1
else:
vr = v2/v1
df1 = n2-1
df2 = n1-1
prob = 2.0*betai(0.5*df2, 0.5*df1, df2/(df2+df1*vr))
if prob > 1.0: prob = 2.0 - prob
return prob
def betai(a, b, x):
if (x < 0.0 or x > 1.0):
print "Bad x in routine betai"
if (x == 0.0 or x == 1.0):
bt=0.0
else:
bt = exp(gammln(a+b) - gammln(a) - gammln(b) + a*log(x) + b*log(1.0-x))
if (x < (a+1.0)/(a+b+2.0)):
return bt * betacf(a,b,x)/a
else:
return 1.0 - bt * betacf(b,a,1.0-x)/b
def betacf(a, b, x):
qab=a+b
qap=a+1.0
qam=a-1.0
c=1.0
d=1.0-qab*x/qap
FPMIN = 1.0e-30
EPS = 3.0e-7
if (abs(d) < FPMIN): d=FPMIN
d=1.0/d
h=d
for m in range(1, 101):
m2=2*m
aa=m*(b-m)*x/((qam+m2)*(a+m2))
d=1.0+aa*d
if (abs(d) < FPMIN): d=FPMIN
c=1.0+aa/c
if (abs(c) < FPMIN): c=FPMIN
d=1.0/d
h *= d*c
aa = -(a+m)*(qab+m)*x/((a+m2)*(qap+m2))
d=1.0+aa*d
if (abs(d) < FPMIN): d=FPMIN
c=1.0+aa/c
if (abs(c) < FPMIN): c=FPMIN
d=1.0/d
delta=d*c
h *= delta
if (abs(delta-1.0) < EPS): break
if (m > 100): print "a or b too big, or MAXIT too small in betacf"
return h
def gammln(xx):
cof = [76.18009172947146,-86.50532032941677,
24.01409824083091,-1.231739572450155,
0.1208650973866179e-2,-0.5395239384953e-5]
y=x=xx
tmp=x+5.5
tmp -= (x+0.5)*log(tmp)
ser=1.000000000190015
for j in range(6):
y = y+1
ser += cof[j]/y
return -tmp+log(2.5066282746310005*ser/x)
def gammp(a, x):
if (x < 0.0 or a <= 0.0):
print "Invalid arguments in routine gammp"
if (x < (a+1.0)):
gamser, gln = gser(a,x)
return gamser
else:
gammcf, gln = gcf(a,x)
return 1.0-gammcf
def gser(a, x):
gln=gammln(a);
if (x <= 0.0):
if (x < 0.0):
print "x less than 0 in routine gser"
gamser=0.0
return
else:
ap=a
delta=sum=1.0/a
for n in range(1, 101):
ap +=1
delta *= x/ap
sum += delta
if (abs(delta) < abs(sum)*3.0e-7):
gamser=sum*exp(-x+a*log(x)-(gln))
break
if n>100: print "a too large, ITMAX too small in routine gser"
return gamser, gln
def gcf(a, x):
gln=gammln(a)
b=x+1.0-a
FPMIN = 1.0e-30
c=1.0/FPMIN
d=1.0/b
h=d
for i in range(1, 101):
an = -i*(i-a)
b += 2.0
d=an*d+b
if (abs(d) < FPMIN): d=FPMIN
c=b+an/c
if (abs(c) < FPMIN): c=FPMIN
d=1.0/d
delta=d*c
h *= delta
if (abs(delta-1.0) < 3.0e-7): break
if (i > 100): print "a too large, ITMAX too small in gcf"
gammcf=exp(-x+a*log(x)-(gln))*h;
return gammcf, gln
def erff(x):
e = gammp(0.5,x*x)
if x < 0.0: e = -e
return e
def erffc(x):
return 1.0 - erff(x)
def mean(x, s=None):
# calculates the weighted mean of x with errors s
if s is None:
return N.sum(x) / len(x)
else:
w = 1.0/s**2
sumw = N.sum(w)
sumx = N.sum(w*x)
return sumx/sumw
def variance(x, s=None, m=None):
# calculates the weighted variance of x with errors s and mean m
if m is None: m = mean(x, s)
if s is None:
sumdx2 = N.sum((x - m)**2)
return sumdx2/len(x)
else:
w = 1.0/s**2
sumw = N.sum(w)
sumdx2 = N.sum(w * (x - m)**2)
return sumdx2/sumw
def stderr(x, s, m):
# calculates the standard error of x with errors s about mean m
w = 1.0/s**2
sumw = N.sum(w)
sumw2 = N.sum(w*w)
sumdx2 = N.sum(w * (x - m)**2)
return sqrt(sumdx2/sumw2)
|
|
"""Support for AirVisual air quality sensors."""
from logging import getLogger
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_STATE,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_SHOW_ON_MAP,
CONF_STATE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from . import AirVisualEntity
from .const import (
CONF_CITY,
CONF_COUNTRY,
CONF_INTEGRATION_TYPE,
DATA_COORDINATOR,
DOMAIN,
INTEGRATION_TYPE_GEOGRAPHY,
)
_LOGGER = getLogger(__name__)
ATTR_CITY = "city"
ATTR_COUNTRY = "country"
ATTR_POLLUTANT_SYMBOL = "pollutant_symbol"
ATTR_POLLUTANT_UNIT = "pollutant_unit"
ATTR_REGION = "region"
SENSOR_KIND_LEVEL = "air_pollution_level"
SENSOR_KIND_AQI = "air_quality_index"
SENSOR_KIND_POLLUTANT = "main_pollutant"
SENSOR_KIND_BATTERY_LEVEL = "battery_level"
SENSOR_KIND_HUMIDITY = "humidity"
SENSOR_KIND_TEMPERATURE = "temperature"
GEOGRAPHY_SENSORS = [
(SENSOR_KIND_LEVEL, "Air Pollution Level", "mdi:gauge", None),
(SENSOR_KIND_AQI, "Air Quality Index", "mdi:chart-line", "AQI"),
(SENSOR_KIND_POLLUTANT, "Main Pollutant", "mdi:chemical-weapon", None),
]
GEOGRAPHY_SENSOR_LOCALES = {"cn": "Chinese", "us": "U.S."}
NODE_PRO_SENSORS = [
(SENSOR_KIND_BATTERY_LEVEL, "Battery", DEVICE_CLASS_BATTERY, PERCENTAGE),
(SENSOR_KIND_HUMIDITY, "Humidity", DEVICE_CLASS_HUMIDITY, PERCENTAGE),
(SENSOR_KIND_TEMPERATURE, "Temperature", DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS),
]
POLLUTANT_LEVEL_MAPPING = [
{"label": "Good", "icon": "mdi:emoticon-excited", "minimum": 0, "maximum": 50},
{"label": "Moderate", "icon": "mdi:emoticon-happy", "minimum": 51, "maximum": 100},
{
"label": "Unhealthy for sensitive groups",
"icon": "mdi:emoticon-neutral",
"minimum": 101,
"maximum": 150,
},
{"label": "Unhealthy", "icon": "mdi:emoticon-sad", "minimum": 151, "maximum": 200},
{
"label": "Very Unhealthy",
"icon": "mdi:emoticon-dead",
"minimum": 201,
"maximum": 300,
},
{"label": "Hazardous", "icon": "mdi:biohazard", "minimum": 301, "maximum": 10000},
]
POLLUTANT_MAPPING = {
"co": {"label": "Carbon Monoxide", "unit": CONCENTRATION_PARTS_PER_MILLION},
"n2": {"label": "Nitrogen Dioxide", "unit": CONCENTRATION_PARTS_PER_BILLION},
"o3": {"label": "Ozone", "unit": CONCENTRATION_PARTS_PER_BILLION},
"p1": {"label": "PM10", "unit": CONCENTRATION_MICROGRAMS_PER_CUBIC_METER},
"p2": {"label": "PM2.5", "unit": CONCENTRATION_MICROGRAMS_PER_CUBIC_METER},
"s2": {"label": "Sulfur Dioxide", "unit": CONCENTRATION_PARTS_PER_BILLION},
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AirVisual sensors based on a config entry."""
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
if config_entry.data[CONF_INTEGRATION_TYPE] == INTEGRATION_TYPE_GEOGRAPHY:
sensors = [
AirVisualGeographySensor(
coordinator,
config_entry,
kind,
name,
icon,
unit,
locale,
)
for locale in GEOGRAPHY_SENSOR_LOCALES
for kind, name, icon, unit in GEOGRAPHY_SENSORS
]
else:
sensors = [
AirVisualNodeProSensor(coordinator, kind, name, device_class, unit)
for kind, name, device_class, unit in NODE_PRO_SENSORS
]
async_add_entities(sensors, True)
class AirVisualGeographySensor(AirVisualEntity):
"""Define an AirVisual sensor related to geography data via the Cloud API."""
def __init__(self, coordinator, config_entry, kind, name, icon, unit, locale):
"""Initialize."""
super().__init__(coordinator)
self._attrs.update(
{
ATTR_CITY: config_entry.data.get(CONF_CITY),
ATTR_STATE: config_entry.data.get(CONF_STATE),
ATTR_COUNTRY: config_entry.data.get(CONF_COUNTRY),
}
)
self._config_entry = config_entry
self._icon = icon
self._kind = kind
self._locale = locale
self._name = name
self._state = None
self._unit = unit
@property
def available(self):
"""Return True if entity is available."""
try:
return self.coordinator.last_update_success and bool(
self.coordinator.data["current"]["pollution"]
)
except KeyError:
return False
@property
def name(self):
"""Return the name."""
return f"{GEOGRAPHY_SENSOR_LOCALES[self._locale]} {self._name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self._config_entry.unique_id}_{self._locale}_{self._kind}"
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
try:
data = self.coordinator.data["current"]["pollution"]
except KeyError:
return
if self._kind == SENSOR_KIND_LEVEL:
aqi = data[f"aqi{self._locale}"]
[level] = [
i
for i in POLLUTANT_LEVEL_MAPPING
if i["minimum"] <= aqi <= i["maximum"]
]
self._state = level["label"]
self._icon = level["icon"]
elif self._kind == SENSOR_KIND_AQI:
self._state = data[f"aqi{self._locale}"]
elif self._kind == SENSOR_KIND_POLLUTANT:
symbol = data[f"main{self._locale}"]
self._state = POLLUTANT_MAPPING[symbol]["label"]
self._attrs.update(
{
ATTR_POLLUTANT_SYMBOL: symbol,
ATTR_POLLUTANT_UNIT: POLLUTANT_MAPPING[symbol]["unit"],
}
)
if CONF_LATITUDE in self._config_entry.data:
if self._config_entry.options[CONF_SHOW_ON_MAP]:
self._attrs[ATTR_LATITUDE] = self._config_entry.data[CONF_LATITUDE]
self._attrs[ATTR_LONGITUDE] = self._config_entry.data[CONF_LONGITUDE]
self._attrs.pop("lati", None)
self._attrs.pop("long", None)
else:
self._attrs["lati"] = self._config_entry.data[CONF_LATITUDE]
self._attrs["long"] = self._config_entry.data[CONF_LONGITUDE]
self._attrs.pop(ATTR_LATITUDE, None)
self._attrs.pop(ATTR_LONGITUDE, None)
class AirVisualNodeProSensor(AirVisualEntity):
"""Define an AirVisual sensor related to a Node/Pro unit."""
def __init__(self, coordinator, kind, name, device_class, unit):
"""Initialize."""
super().__init__(coordinator)
self._device_class = device_class
self._kind = kind
self._name = name
self._state = None
self._unit = unit
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self.coordinator.data["serial_number"])},
"name": self.coordinator.data["settings"]["node_name"],
"manufacturer": "AirVisual",
"model": f'{self.coordinator.data["status"]["model"]}',
"sw_version": (
f'Version {self.coordinator.data["status"]["system_version"]}'
f'{self.coordinator.data["status"]["app_version"]}'
),
}
@property
def name(self):
"""Return the name."""
node_name = self.coordinator.data["settings"]["node_name"]
return f"{node_name} Node/Pro: {self._name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self.coordinator.data['serial_number']}_{self._kind}"
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
if self._kind == SENSOR_KIND_BATTERY_LEVEL:
self._state = self.coordinator.data["status"]["battery"]
elif self._kind == SENSOR_KIND_HUMIDITY:
self._state = self.coordinator.data["measurements"].get("humidity")
elif self._kind == SENSOR_KIND_TEMPERATURE:
self._state = self.coordinator.data["measurements"].get("temperature_C")
|
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manage user accounts on a Google Compute Engine instances."""
import datetime
import json
import logging.handlers
import optparse
import random
from google_compute_engine import config_manager
from google_compute_engine import file_utils
from google_compute_engine import logger
from google_compute_engine import metadata_watcher
from google_compute_engine.accounts import accounts_utils
LOCKFILE = '/var/lock/google_accounts.lock'
class AccountsDaemon(object):
"""Manage user accounts based on changes to metadata."""
invalid_users = set()
user_ssh_keys = {}
def __init__(self, groups=None, remove=False, debug=False):
"""Constructor.
Args:
groups: string, a comma separated list of groups.
remove: bool, True if deprovisioning a user should be destructive.
debug: bool, True if debug output should write to the console.
"""
facility = logging.handlers.SysLogHandler.LOG_DAEMON
self.logger = logger.Logger(
name='google-accounts', debug=debug, facility=facility)
self.watcher = metadata_watcher.MetadataWatcher(logger=self.logger)
self.utils = accounts_utils.AccountsUtils(
logger=self.logger, groups=groups, remove=remove)
try:
with file_utils.LockFile(LOCKFILE):
self.logger.info('Starting Google Accounts daemon.')
timeout = 60 + random.randint(0, 30)
self.watcher.WatchMetadata(
self.HandleAccounts, recursive=True, timeout=timeout)
except (IOError, OSError) as e:
self.logger.warning(str(e))
def _HasExpired(self, key):
"""Check whether an SSH key has expired.
Uses Google-specific semantics of the OpenSSH public key format's comment
field to determine if an SSH key is past its expiration timestamp, and
therefore no longer to be trusted. This format is still subject to change.
Reliance on it in any way is at your own risk.
Args:
key: string, a single public key entry in OpenSSH public key file format.
This will be checked for Google-specific comment semantics, and if
present, those will be analysed.
Returns:
bool, True if the key has Google-specific comment semantics and has an
expiration timestamp in the past, or False otherwise.
"""
self.logger.debug('Processing key: %s.', key)
try:
schema, json_str = key.split(None, 3)[2:]
except (ValueError, AttributeError):
self.logger.debug('No schema identifier. Not expiring key.')
return False
if schema != 'google-ssh':
self.logger.debug('Invalid schema %s. Not expiring key.', schema)
return False
try:
json_obj = json.loads(json_str)
except ValueError:
self.logger.debug('Invalid JSON %s. Not expiring key.', json_str)
return False
if 'expireOn' not in json_obj:
self.logger.debug('No expiration timestamp. Not expiring key.')
return False
expire_str = json_obj['expireOn']
format_str = '%Y-%m-%dT%H:%M:%S+0000'
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
self.logger.warning(
'Expiration timestamp "%s" not in format %s. Not expiring key.',
expire_str, format_str)
return False
# Expire the key if and only if we have exceeded the expiration timestamp.
return datetime.datetime.utcnow() > expire_time
def _ParseAccountsData(self, account_data):
"""Parse the SSH key data into a user map.
Args:
account_data: string, the metadata server SSH key attributes data.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
if not account_data:
return {}
lines = [line for line in account_data.splitlines() if line]
user_map = {}
for line in lines:
if not all(ord(c) < 128 for c in line):
self.logger.info('SSH key contains non-ascii character: %s.', line)
continue
split_line = line.split(':', 1)
if len(split_line) != 2:
self.logger.info('SSH key is not a complete entry: %s.', split_line)
continue
user, key = split_line
if self._HasExpired(key):
self.logger.debug('Expired SSH key for user %s: %s.', user, key)
continue
if user not in user_map:
user_map[user] = []
user_map[user].append(key)
logging.debug('User accounts: %s.', user_map)
return user_map
def _GetAccountsData(self, metadata_dict):
"""Get the user accounts specified in metadata server contents.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
dict, a mapping of the form: {'username': ['sshkey1, 'sshkey2', ...]}.
"""
metadata_dict = metadata_dict or {}
try:
instance_data = metadata_dict['instance']['attributes']
except KeyError:
instance_data = {}
self.logger.warning('Instance attributes were not found.')
try:
project_data = metadata_dict['project']['attributes']
except KeyError:
project_data = {}
self.logger.warning('Project attributes were not found.')
valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
block_project = instance_data.get('block-project-ssh-keys', '').lower()
if block_project != 'true' and not instance_data.get('sshKeys'):
valid_keys.append(project_data.get('ssh-keys'))
valid_keys.append(project_data.get('sshKeys'))
accounts_data = '\n'.join([key for key in valid_keys if key])
return self._ParseAccountsData(accounts_data)
def _UpdateUsers(self, update_users):
"""Provision and update Linux user accounts based on account metadata.
Args:
update_users: dict, authorized users mapped to their public SSH keys.
"""
for user, ssh_keys in update_users.items():
if not user or user in self.invalid_users:
continue
configured_keys = self.user_ssh_keys.get(user, [])
if set(ssh_keys) != set(configured_keys):
if not self.utils.UpdateUser(user, ssh_keys):
self.invalid_users.add(user)
else:
self.user_ssh_keys[user] = ssh_keys[:]
def _RemoveUsers(self, remove_users):
"""Deprovision Linux user accounts that do not appear in account metadata.
Args:
remove_users: list, the username strings of the Linux accounts to remove.
"""
for username in remove_users:
self.utils.RemoveUser(username)
self.invalid_users -= set(remove_users)
def HandleAccounts(self, result):
"""Called when there are changes to the contents of the metadata server.
Args:
result: json, the deserialized contents of the metadata server.
"""
self.logger.debug('Checking for changes to user accounts.')
configured_users = self.utils.GetConfiguredUsers()
desired_users = self._GetAccountsData(result)
remove_users = sorted(set(configured_users) - set(desired_users.keys()))
self._UpdateUsers(desired_users)
self._RemoveUsers(remove_users)
self.utils.SetConfiguredUsers(desired_users.keys())
def main():
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug', action='store_true', dest='debug',
help='print debug output to the console.')
(options, _) = parser.parse_args()
instance_config = config_manager.ConfigManager()
if instance_config.GetOptionBool('Daemons', 'accounts_daemon'):
AccountsDaemon(
groups=instance_config.GetOptionString('Accounts', 'groups'),
remove=instance_config.GetOptionBool('Accounts', 'deprovision_remove'),
debug=bool(options.debug))
if __name__ == '__main__':
main()
|
|
def chr_is_hex(c):
return (c >= '0' and c <= '9') or (c >= 'a' and c <= 'f') or (c >= 'A' and c <= 'F')
def chr_is_oct(c):
return c >= '0' and c <= '7'
def chr_is_bin(c):
return c == '0' or c == '1'
def chr_is_identifier_start(c):
return c.isalpha() or c in '_'
def chr_is_identifier(c):
return c.isalnum() or c in '_'
class AsmReader:
def __init__(self, text='', filename=''):
self.text = text
self.lineno = 1
self.filename = filename
def feed(self, text):
self.text += text
def syntax_error(self, message):
read_len = len(self.line_start) - len(self.text)
next_line = self.line_start.find('\n')
line = self.line_start[:(next_line if next_line > -1 else len(self.line_start) - 1)]
raise SyntaxError(message, (self.filename, self.lineno, read_len, line))
def __iter__(self):
return self
def __next__(self):
if not self.text:
raise StopIteration()
char = self.next_interesting_character()
while char == ';' or char == '\n':
if char == ';':
self.skip_comment()
if self.text:
self.read('\n')
self.lineno += 1
char = self.next_interesting_character()
if char == '\n':
self.skip(1)
char = self.next_interesting_character()
self.lineno += 1
self.line_start = self.text
if char == '':
return ('eof', None)
if char == '.':
self.skip(1)
const = self.next_constant()
self.end_of_line()
return const
elif char == '@':
self.skip(1)
ent_local = self.next_entity_local()
self.end_of_line()
return ent_local
elif char == '_':
self.skip(1)
return self.next_local_label()
elif char == '#':
self.skip(1)
directive = self.next_directive()
self.end_of_line()
return directive
else:
text = self.text
# TODO there should be a better way to do this
try:
return ('label', self.read_label())
except SyntaxError as e:
self.text = text
instr = self.next_instruction()
self.end_of_line()
return instr
def next_instruction(self):
instr = self.read_symbol().upper()
if self.head == '.':
instr += self.read('.') + self.read_symbol().upper()
whitespace = self.skip_whitespace()
if self.text and self.head not in '\n;' and not whitespace:
self.syntax_error('Expected newline, got %r' % self.head)
operands = []
if instr in ['CMD', 'TEST']: # special case
operands.append(self.read_at_least_once(lambda c: c != '\n', 'non-newline'))
return ('instruction', (instr, operands))
first = True
while self.text and self.head not in '\n;':
if not first:
self.read(',')
self.skip_whitespace()
operands.append(self.read_ref())
self.skip_whitespace()
first = False
return ('instruction', (instr, operands))
def next_constant(self):
name = self.read_symbol()
self.read_whitespace()
value = self.read_ref()
return ('const', (name, value))
def next_entity_local(self):
name = self.read_symbol()
specific = ''
if self.head == ' ':
self.read_whitespace()
specific = self.read_at_least_once(lambda c: c != '\n', 'non-newline')
return ('entity_local', (name, specific))
def next_local_label(self):
return ('local_label', self.read_label())
def next_directive(self):
name = self.read_symbol()
self.read_whitespace()
value = self.read_at_least_once(lambda c: c != '\n', 'non-newline')
return ('directive', (name, value))
def read_whitespace(self):
self.read_at_least_once(lambda c: c in ' \t', 'whitespace')
def read_ref(self):
head = self.head
if head == '#':
self.skip(1)
return ('literal', self.read_number())
elif head.isnumeric():
return ('address', self.read_number())
elif head == '"':
return ('string', self.read_string())
else:
return ('symbol', self.read_symbol())
def read_number(self):
mul = -1 if self.head == '-' else 1
if mul == -1: # Read negative sign
self.skip(1)
if self.head == '0':
type = self.peek()
if type == 'x':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_hex, 'hex char'), 16)
elif type == 'b':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_bin, 'bin char'), 2)
elif type == 'o':
self.skip(2)
return mul*int(self.read_at_least_once(chr_is_oct, 'oct char'), 8)
# fall through to read as decimal number
return mul*int(self.read_at_least_once(str.isdecimal, 'decimal char'))
def read_string(self):
self.read('"')
string = ''
while True:
string += self.read_while(lambda c: c not in '\n\\"')
if self.head == '\n':
self.syntax_error('Unterminated string')
elif self.head == '\\':
self.skip(1)
if self.head == 'n':
string += '\n'
elif self.head == '"':
string += '"'
else:
self.syntax_error('Invalid escape %r' % self.head)
self.skip(1)
else:
break
self.read('"')
return string
def read_label(self):
name = self.read_symbol()
self.read(':')
return name
def read_symbol(self):
symb = self.read(chr_is_identifier_start, 'start of identifier')
symb += self.read_while(chr_is_identifier)
return symb
def read(self, cond, desc=''):
head = self.head
test = cond(head) if callable(cond) else head == cond
if test:
self.skip(1)
return head
if not desc:
desc = '<unknown expectation>'
self.syntax_error('Expected %s, got %r' % (desc if callable(cond) else repr(cond), head))
def read_any(self, options):
return self.read(lambda c: c in options, 'any of %s' % list(options))
def read_at_least_once(self, cond, desc=''):
val = self.read(cond, desc)
val += self.read_while(cond)
return val
def read_while(self, cond):
ptr = 0
while ptr < len(self.text) and cond(self.text[ptr]):
ptr += 1
val = self.text[:ptr]
self.skip(ptr)
return val
def peek(self):
return self.text[1] if len(self.text) > 1 else ''
def skip(self, n):
if n >= len(self.text):
self.text = ''
else:
self.text = self.text[n:]
def skip_comment(self):
ptr = 0
while ptr < len(self.text) and self.text[ptr] != '\n':
ptr += 1
self.skip(ptr)
def skip_whitespace(self):
return self.read_while(lambda c: c in ' \t')
def next_interesting_character(self):
self.skip_whitespace()
return self.head
def end_of_line(self):
self.skip_whitespace()
if self.text:
old = self.text
self.read_any('\n;')
self.text = old # don't read, only peek
head = property(lambda self: self.text[0] if self.text else '')
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
import functools
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as vutil
import nova.conf
from nova import exception
from nova.i18n import _
from nova.network import model as network_model
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import vim_util
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
ALL_SUPPORTED_NETWORK_DEVICES = ['VirtualE1000', 'VirtualE1000e',
'VirtualPCNet32', 'VirtualSriovEthernetCard',
'VirtualVmxnet', 'VirtualVmxnet3']
CONTROLLER_TO_ADAPTER_TYPE = {
"VirtualLsiLogicController": constants.DEFAULT_ADAPTER_TYPE,
"VirtualBusLogicController": constants.ADAPTER_TYPE_BUSLOGIC,
"VirtualIDEController": constants.ADAPTER_TYPE_IDE,
"VirtualLsiLogicSASController": constants.ADAPTER_TYPE_LSILOGICSAS,
"ParaVirtualSCSIController": constants.ADAPTER_TYPE_PARAVIRTUAL
}
# A simple cache for storing inventory folder references.
# Format: {inventory_path: folder_ref}
_FOLDER_PATH_REF_MAPPING = {}
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
class Limits(object):
def __init__(self, limit=None, reservation=None,
shares_level=None, shares_share=None):
"""imits object holds instance limits for convenience."""
self.limit = limit
self.reservation = reservation
self.shares_level = shares_level
self.shares_share = shares_share
def validate(self):
if self.shares_level in ('high', 'normal', 'low'):
if self.shares_share:
reason = _("Share level '%s' cannot have share "
"configured") % self.shares_level
raise exception.InvalidInput(reason=reason)
return
if self.shares_level == 'custom':
return
if self.shares_level:
reason = _("Share '%s' is not supported") % self.shares_level
raise exception.InvalidInput(reason=reason)
def has_limits(self):
return bool(self.limit or
self.reservation or
self.shares_level)
class ExtraSpecs(object):
def __init__(self, cpu_limits=None, hw_version=None,
storage_policy=None, cores_per_socket=None,
memory_limits=None, disk_io_limits=None,
vif_limits=None, firmware=None, hw_video_ram=None):
"""ExtraSpecs object holds extra_specs for the instance."""
self.cpu_limits = cpu_limits or Limits()
self.memory_limits = memory_limits or Limits()
self.disk_io_limits = disk_io_limits or Limits()
self.vif_limits = vif_limits or Limits()
self.hw_version = hw_version
self.storage_policy = storage_policy
self.cores_per_socket = cores_per_socket
self.firmware = firmware
self.hw_video_ram = hw_video_ram
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id_):
_VM_REFS_CACHE.pop(id_, None)
def vm_ref_cache_update(id_, vm_ref):
_VM_REFS_CACHE[id_] = vm_ref
def vm_ref_cache_get(id_):
return _VM_REFS_CACHE.get(id_)
def _vm_ref_cache(id_, func, session, data):
vm_ref = vm_ref_cache_get(id_)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id_, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id_ = instance.uuid
return _vm_ref_cache(id_, func, session, instance)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
VmdkInfo = collections.namedtuple('VmdkInfo', ['path', 'adapter_type',
'disk_type',
'capacity_in_bytes',
'device'])
def _iface_id_option_value(client_factory, iface_id, port_index):
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % port_index
opt.value = iface_id
return opt
def _get_allocation_info(client_factory, limits, allocation_type):
allocation = client_factory.create(allocation_type)
if limits.limit:
allocation.limit = limits.limit
else:
# Set as 'unlimited'
allocation.limit = -1
if limits.reservation:
allocation.reservation = limits.reservation
else:
allocation.reservation = 0
shares = client_factory.create('ns0:SharesInfo')
if limits.shares_level:
shares.level = limits.shares_level
if (shares.level == 'custom' and
limits.shares_share):
shares.shares = limits.shares_share
else:
shares.shares = 0
else:
shares.level = 'normal'
shares.shares = 0
# The VirtualEthernetCardResourceAllocation has 'share' instead of
# 'shares'.
if hasattr(allocation, 'share'):
allocation.share = shares
else:
allocation.shares = shares
return allocation
def get_vm_create_spec(client_factory, instance, data_store_name,
vif_infos, extra_specs,
os_type=constants.DEFAULT_OS_TYPE,
profile_spec=None, metadata=None):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = instance.uuid
config_spec.guestId = os_type
# The name is the unique identifier for the VM.
config_spec.instanceUuid = instance.uuid
if metadata:
config_spec.annotation = metadata
# set the Hardware version
config_spec.version = extra_specs.hw_version
# Allow nested hypervisor instances to host 64 bit VMs.
if os_type in ("vmkernel5Guest", "vmkernel6Guest", "vmkernel65Guest",
"windowsHyperVGuest"):
config_spec.nestedHVEnabled = "True"
# Append the profile spec
if profile_spec:
config_spec.vmProfile = [profile_spec]
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance.vcpus)
if extra_specs.cores_per_socket:
config_spec.numCoresPerSocket = int(extra_specs.cores_per_socket)
config_spec.memoryMB = int(instance.memory_mb)
# Configure cpu information
if extra_specs.cpu_limits.has_limits():
config_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
# Configure memory information
if extra_specs.memory_limits.has_limits():
config_spec.memoryAllocation = _get_allocation_info(
client_factory, extra_specs.memory_limits,
'ns0:ResourceAllocationInfo')
if extra_specs.firmware:
config_spec.firmware = extra_specs.firmware
devices = []
for i, vif_info in enumerate(vif_infos):
vif_spec = _create_vif_spec(client_factory, vif_info,
extra_specs.vif_limits, i)
devices.append(vif_spec)
serial_port_spec = create_serial_port_spec(client_factory)
if serial_port_spec:
devices.append(serial_port_spec)
virtual_device_config_spec = create_video_card_spec(client_factory,
extra_specs)
if virtual_device_config_spec:
devices.append(virtual_device_config_spec)
config_spec.deviceChange = devices
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance.uuid
extra_config.append(opt)
# enable to provide info needed by udev to generate /dev/disk/by-id
opt = client_factory.create('ns0:OptionValue')
opt.key = "disk.EnableUUID"
opt.value = 'true'
extra_config.append(opt)
port_index = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
extra_config.append(_iface_id_option_value(client_factory,
vif_info['iface_id'],
port_index))
port_index += 1
if (CONF.vmware.console_delay_seconds and
CONF.vmware.console_delay_seconds > 0):
opt = client_factory.create('ns0:OptionValue')
opt.key = 'keyboard.typematicMinDelay'
opt.value = CONF.vmware.console_delay_seconds * 1000000
extra_config.append(opt)
config_spec.extraConfig = extra_config
# Set the VM to be 'managed' by 'OpenStack'
managed_by = client_factory.create('ns0:ManagedByInfo')
managed_by.extensionKey = constants.EXTENSION_KEY
managed_by.type = constants.EXTENSION_TYPE_INSTANCE
config_spec.managedBy = managed_by
return config_spec
def create_video_card_spec(client_factory, extra_specs):
if extra_specs.hw_video_ram:
video_card = client_factory.create('ns0:VirtualMachineVideoCard')
video_card.videoRamSizeInKB = extra_specs.hw_video_ram
video_card.key = -1
virtual_device_config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config_spec.operation = "add"
virtual_device_config_spec.device = video_card
return virtual_device_config_spec
def create_serial_port_spec(client_factory):
"""Creates config spec for serial port."""
if not CONF.vmware.serial_port_service_uri:
return
backing = client_factory.create('ns0:VirtualSerialPortURIBackingInfo')
backing.direction = "server"
backing.serviceURI = CONF.vmware.serial_port_service_uri
backing.proxyURI = CONF.vmware.serial_port_proxy_uri
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
serial_port = client_factory.create('ns0:VirtualSerialPort')
serial_port.connectable = connectable_spec
serial_port.backing = backing
# we are using unique negative integers as temporary keys
serial_port.key = -2
serial_port.yieldOnPoll = True
dev_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
dev_spec.operation = "add"
dev_spec.device = serial_port
return dev_spec
def get_vm_boot_spec(client_factory, device):
"""Returns updated boot settings for the instance.
The boot order for the instance will be changed to have the
input device as the boot disk.
"""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
boot_disk = client_factory.create(
'ns0:VirtualMachineBootOptionsBootableDiskDevice')
boot_disk.deviceKey = device.key
boot_options = client_factory.create('ns0:VirtualMachineBootOptions')
boot_options.bootOrder = [boot_disk]
config_spec.bootOptions = boot_options
return config_spec
def get_vm_resize_spec(client_factory, vcpus, memory_mb, extra_specs,
metadata=None):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = vcpus
resize_spec.memoryMB = memory_mb
resize_spec.cpuAllocation = _get_allocation_info(
client_factory, extra_specs.cpu_limits,
'ns0:ResourceAllocationInfo')
if metadata:
resize_spec.annotation = metadata
return resize_spec
def create_controller_spec(client_factory, key,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
bus_number=0):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == constants.ADAPTER_TYPE_BUSLOGIC:
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == constants.ADAPTER_TYPE_LSILOGICSAS:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
elif adapter_type == constants.ADAPTER_TYPE_PARAVIRTUAL:
virtual_controller = client_factory.create(
'ns0:ParaVirtualSCSIController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = bus_number
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def convert_vif_model(name):
"""Converts standard VIF_MODEL types to the internal VMware ones."""
if name == network_model.VIF_MODEL_E1000:
return 'VirtualE1000'
if name == network_model.VIF_MODEL_E1000E:
return 'VirtualE1000e'
if name == network_model.VIF_MODEL_PCNET:
return 'VirtualPCNet32'
if name == network_model.VIF_MODEL_SRIOV:
return 'VirtualSriovEthernetCard'
if name == network_model.VIF_MODEL_VMXNET:
return 'VirtualVmxnet'
if name == network_model.VIF_MODEL_VMXNET3:
return 'VirtualVmxnet3'
if name not in ALL_SUPPORTED_NETWORK_DEVICES:
msg = _('%s is not supported.') % name
raise exception.Invalid(msg)
return name
def _create_vif_spec(client_factory, vif_info, vif_limits=None, offset=0):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
vif_info['vif_model'] = convert_vif_model(vif_info['vif_model'])
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing = client_factory.create(
'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo')
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
# Configure externalId
if network_ref['use-external-id']:
# externalId is only supported from vCenter 6.0 onwards
if hasattr(net_device, 'externalId'):
net_device.externalId = vif_info['iface_id']
else:
dp = client_factory.create('ns0:DynamicProperty')
dp.name = "__externalId__"
dp.val = vif_info['iface_id']
net_device.dynamicProperty = [dp]
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing = client_factory.create(
'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo')
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
if 'dvs_port_key' in network_ref:
portgroup.portKey = network_ref['dvs_port_key']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47 - offset
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
# vnic limits are only supported from version 6.0
if vif_limits and vif_limits.has_limits():
if hasattr(net_device, 'resourceAllocation'):
net_device.resourceAllocation = _get_allocation_info(
client_factory, vif_limits,
'ns0:VirtualEthernetCardResourceAllocation')
else:
msg = _('Limits only supported from vCenter 6.0 and above')
raise exception.Invalid(msg)
network_spec.device = net_device
return network_spec
def get_network_attach_config_spec(client_factory, vif_info, index,
vif_limits=None):
"""Builds the vif attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
vif_spec = _create_vif_spec(client_factory, vif_info, vif_limits)
config_spec.deviceChange = [vif_spec]
if vif_info['iface_id'] is not None:
config_spec.extraConfig = [_iface_id_option_value(client_factory,
vif_info['iface_id'],
index)]
return config_spec
def get_network_detach_config_spec(client_factory, device, port_index):
"""Builds the vif detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
config_spec.deviceChange = [virtual_device_config]
# If a key is already present then it cannot be deleted, only updated.
# This enables us to reuse this key if there is an additional
# attachment. The keys need to be preserved. This is due to the fact
# that there is logic on the ESX that does the network wiring
# according to these values. If they are changed then this will
# break networking to and from the interface.
config_spec.extraConfig = [_iface_id_option_value(client_factory,
'free',
port_index)]
return config_spec
def update_vif_spec(client_factory, vif_info, device):
"""Updates the backing for the VIF spec."""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = 'edit'
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing = client_factory.create(
'ns0:VirtualEthernetCardOpaqueNetworkBackingInfo')
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
# Configure externalId
if network_ref['use-external-id']:
if hasattr(device, 'externalId'):
device.externalId = vif_info['iface_id']
else:
dp = client_factory.create('ns0:DynamicProperty')
dp.name = "__externalId__"
dp.val = vif_info['iface_id']
device.dynamicProperty = [dp]
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing = client_factory.create(
'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo')
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
device.backing = backing
network_spec.device = device
return network_spec
def get_storage_profile_spec(session, storage_policy):
"""Gets the vm profile spec configured for storage policy."""
profile_id = pbm.get_profile_id_by_name(session, storage_policy)
if profile_id:
client_factory = session.vim.client.factory
storage_profile_spec = client_factory.create(
'ns0:VirtualMachineDefinedProfileSpec')
storage_profile_spec.profileId = profile_id.uniqueId
return storage_profile_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = _create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name, disk_io_limits)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.items():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def _get_device_capacity(device):
# Devices pre-vSphere-5.5 only reports capacityInKB, which has
# rounding inaccuracies. Use that only if the more accurate
# attribute is absent.
if hasattr(device, 'capacityInBytes'):
return device.capacityInBytes
else:
return device.capacityInKB * units.Ki
def _get_device_disk_type(device):
if getattr(device.backing, 'thinProvisioned', False):
return constants.DISK_TYPE_THIN
else:
if getattr(device.backing, 'eagerlyScrub', False):
return constants.DISK_TYPE_EAGER_ZEROED_THICK
else:
return constants.DEFAULT_DISK_TYPE
def get_hardware_devices(session, vm_ref):
hardware_devices = session._call_method(vutil,
"get_object_property",
vm_ref,
"config.hardware.device")
return vim_util.get_array_items(hardware_devices)
def get_vmdk_info(session, vm_ref, uuid=None):
"""Returns information for the primary VMDK attached to the given VM."""
hardware_devices = get_hardware_devices(session, vm_ref)
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
capacity_in_bytes = 0
# Determine if we need to get the details of the root disk
root_disk = None
root_device = None
if uuid:
root_disk = '%s.vmdk' % uuid
vmdk_device = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
path = ds_obj.DatastorePath.parse(device.backing.fileName)
if root_disk and path.basename == root_disk:
root_device = device
vmdk_device = device
elif device.__class__.__name__ in CONTROLLER_TO_ADAPTER_TYPE:
adapter_type_dict[device.key] = CONTROLLER_TO_ADAPTER_TYPE[
device.__class__.__name__]
if root_disk:
vmdk_device = root_device
if vmdk_device:
vmdk_file_path = vmdk_device.backing.fileName
capacity_in_bytes = _get_device_capacity(vmdk_device)
vmdk_controller_key = vmdk_device.controllerKey
disk_type = _get_device_disk_type(vmdk_device)
adapter_type = adapter_type_dict.get(vmdk_controller_key)
return VmdkInfo(vmdk_file_path, adapter_type, disk_type,
capacity_in_bytes, vmdk_device)
scsi_controller_classes = {
'ParaVirtualSCSIController': constants.ADAPTER_TYPE_PARAVIRTUAL,
'VirtualLsiLogicController': constants.DEFAULT_ADAPTER_TYPE,
'VirtualLsiLogicSASController': constants.ADAPTER_TYPE_LSILOGICSAS,
'VirtualBusLogicController': constants.ADAPTER_TYPE_BUSLOGIC,
}
def get_scsi_adapter_type(hardware_devices):
"""Selects a proper iscsi adapter type from the existing
hardware devices
"""
for device in hardware_devices:
if device.__class__.__name__ in scsi_controller_classes:
# find the controllers which still have available slots
if len(device.device) < constants.SCSI_MAX_CONNECT_NUMBER:
# return the first match one
return scsi_controller_classes[device.__class__.__name__]
raise exception.StorageError(
reason=_("Unable to find iSCSI Target"))
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if unit_number not in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController',
'ParaVirtualSCSIController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def _get_bus_number_for_scsi_controller(devices):
"""Return usable bus number when create new SCSI controller."""
# Every SCSI controller will take a unique bus number
taken = [dev.busNumber for dev in devices if _is_scsi_controller(dev)]
# The max bus number for SCSI controllers is 3
for i in range(constants.SCSI_MAX_CONTROLLER_NUMBER):
if i not in taken:
return i
msg = _('Only %d SCSI controllers are allowed to be '
'created on this instance.') % constants.SCSI_MAX_CONTROLLER_NUMBER
raise vexc.VMwareDriverException(msg)
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == constants.ADAPTER_TYPE_IDE:
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in constants.SCSI_ADAPTER_TYPES:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
# Get free bus number for new SCSI controller.
bus_number = 0
if adapter_type in constants.SCSI_ADAPTER_TYPES:
bus_number = _get_bus_number_for_scsi_controller(devices)
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type, bus_number)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_vmdk_create_spec(client_factory, size_in_kb,
adapter_type=constants.DEFAULT_ADAPTER_TYPE,
disk_type=constants.DEFAULT_DISK_TYPE):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def _create_virtual_disk_spec(client_factory, controller_key,
disk_type=constants.DEFAULT_DISK_TYPE,
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None,
disk_io_limits=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == constants.DISK_TYPE_THIN:
disk_file_backing.thinProvisioned = True
else:
if disk_type == constants.DISK_TYPE_EAGER_ZEROED_THICK:
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
if disk_io_limits and disk_io_limits.has_limits():
virtual_disk.storageIOAllocation = _get_allocation_info(
client_factory, disk_io_limits,
'ns0:StorageIOAllocationInfo')
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, res_pool=None, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing",
devices=None):
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.host = host
rel_spec.pool = res_pool
rel_spec.diskMoveType = disk_move_type
if devices is not None:
rel_spec.deviceChange = devices
return rel_spec
def relocate_vm(session, vm_ref, res_pool=None, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing",
devices=None):
client_factory = session.vim.client.factory
rel_spec = relocate_vm_spec(client_factory, res_pool, datastore, host,
disk_move_type, devices)
relocate_task = session._call_method(session.vim, "RelocateVM_Task",
vm_ref, spec=rel_spec)
session._wait_for_task(relocate_task)
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
opt_keymap = client_factory.create('ns0:OptionValue')
opt_keymap.key = "RemoteDisplay.vnc.keyMap"
opt_keymap.value = CONF.vmware.vnc_keymap
extras = [opt_enabled, opt_port, opt_keymap]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
with vutil.WithRetrieval(session.vim, result) as objects:
for obj in objects:
if not hasattr(obj, 'propSet') or not obj.propSet:
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
return vnc_ports
def _get_object_for_value(objects, value):
for object in objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(objects, value):
for object in objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
with vutil.WithRetrieval(session.vim, results) as objects:
return func(objects, value)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching vm_name.
It is far more optimal to use _get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session.vim,
"FindAllByUuid",
session.vim.service_content.searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance.uuid
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance.name))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_name(session, identifier))
return vm_ref
def get_host_ref_for_vm(session, instance):
"""Get a MoRef to the ESXi host currently running an instance."""
vm_ref = get_vm_ref(session, instance)
return session._call_method(vutil, "get_object_property",
vm_ref, "runtime.host")
def get_host_name_for_vm(session, instance):
"""Get the hostname of the ESXi host currently running an instance."""
host_ref = get_host_ref_for_vm(session, instance)
return session._call_method(vutil, "get_object_property",
host_ref, "name")
def get_vm_state(session, instance):
vm_ref = get_vm_ref(session, instance)
vm_state = session._call_method(vutil, "get_object_property",
vm_ref, "runtime.powerState")
return constants.POWER_STATES[vm_state]
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
vcpus = 0
max_vcpus_per_host = 0
used_mem_mb = 0
total_mem_mb = 0
max_mem_mb_per_host = 0
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vutil,
"get_object_properties_dict",
cluster,
["host"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime",
"summary.quickStats"])
with vutil.WithRetrieval(session.vim, result) as objects:
for obj in objects:
host_props = propset_dict(obj.propSet)
runtime_summary = host_props['summary.runtime']
if (runtime_summary.inMaintenanceMode is not False or
runtime_summary.connectionState != "connected"):
continue
hardware_summary = host_props['summary.hardware']
stats_summary = host_props['summary.quickStats']
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
threads = hardware_summary.numCpuThreads
vcpus += threads
max_vcpus_per_host = max(max_vcpus_per_host, threads)
used_mem_mb += stats_summary.overallMemoryUsage
mem_mb = hardware_summary.memorySize // units.Mi
total_mem_mb += mem_mb
max_mem_mb_per_host = max(max_mem_mb_per_host, mem_mb)
stats = {'cpu': {'vcpus': vcpus,
'max_vcpus_per_host': max_vcpus_per_host},
'mem': {'total': total_mem_mb,
'free': total_mem_mb - used_mem_mb,
'max_mem_mb_per_host': max_mem_mb_per_host}}
return stats
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
session._call_method(vutil, 'cancel_retrieval',
results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vutil, "get_object_property",
cluster, "host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
| http://pubs.vmware.com/vsphere-51/index.jsp
| #com.vmware.wssdk.apiref.doc/
| vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
return {prop.name: prop.val for prop in propset}
def get_vmdk_backed_disk_device(hardware_devices, uuid):
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster):
"""Get the resource pool."""
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vutil,
"get_object_property",
cluster,
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
with vutil.WithRetrieval(session.vim, results) as objects:
return list(objects)
except Exception as excep:
LOG.warning("Failed to get cluster references %s", excep)
def get_cluster_ref_by_name(session, cluster_name):
"""Get reference to the vCenter cluster with the specified name."""
all_clusters = get_all_cluster_mors(session)
for cluster in all_clusters:
if (hasattr(cluster, 'propSet') and cluster.propSet and
cluster.propSet[0].val == cluster_name):
return cluster.obj
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS, LSILogic & ParaVirtual
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type in [constants.ADAPTER_TYPE_LSILOGICSAS,
constants.ADAPTER_TYPE_PARAVIRTUAL]:
vmdk_adapter_type = constants.DEFAULT_ADAPTER_TYPE
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
@loopingcall.RetryDecorator(
max_retry_count=20, inc_sleep_time=2, max_sleep_time=20,
exceptions=(vexc.VimFaultException,))
def create_vm(session, instance, vm_folder, config_spec, res_pool_ref):
"""Create VM on ESX host."""
LOG.debug("Creating VM on the ESX host", instance=instance)
vm_create_task = session._call_method(
session.vim,
"CreateVM_Task", vm_folder,
config=config_spec, pool=res_pool_ref)
try:
task_info = session._wait_for_task(vm_create_task)
except vexc.VMwareDriverException:
# An invalid guestId will result in an error with no specific fault
# type and the generic error 'A specified parameter was not correct'.
# As guestId is user-editable, we try to help the user out with some
# additional information if we notice that guestId isn't in our list of
# known-good values.
# We don't check this in advance or do anything more than warn because
# we can't guarantee that our list of known-good guestIds is complete.
# Consequently, a value which we don't recognise may in fact be valid.
with excutils.save_and_reraise_exception():
if config_spec.guestId not in constants.VALID_OS_TYPES:
LOG.warning('vmware_ostype from image is not recognised: '
'\'%(ostype)s\'. An invalid os type may be '
'one cause of this instance creation failure',
{'ostype': config_spec.guestId})
LOG.debug("Created VM on the ESX host", instance=instance)
return task_info.result
def destroy_vm(session, instance, vm_ref=None):
"""Destroy a VM instance. Assumes VM is powered off."""
try:
if not vm_ref:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Destroying the VM", instance=instance)
destroy_task = session._call_method(session.vim, "Destroy_Task",
vm_ref)
session._wait_for_task(destroy_task)
LOG.info("Destroyed the VM", instance=instance)
except Exception:
LOG.exception('Destroy VM failed', instance=instance)
def create_virtual_disk(session, dc_ref, adapter_type, disk_type,
virtual_disk_path, size_in_kb):
# Create a Virtual Disk of the size of the flat vmdk file. This is
# done just to generate the meta-data file whose specifics
# depend on the size of the disk, thin/thick provisioning and the
# storage adapter type.
LOG.debug("Creating Virtual Disk of size "
"%(vmdk_file_size_in_kb)s KB and adapter type "
"%(adapter_type)s on the data store",
{"vmdk_file_size_in_kb": size_in_kb,
"adapter_type": adapter_type})
vmdk_create_spec = get_vmdk_create_spec(
session.vim.client.factory,
size_in_kb,
adapter_type,
disk_type)
vmdk_create_task = session._call_method(
session.vim,
"CreateVirtualDisk_Task",
session.vim.service_content.virtualDiskManager,
name=virtual_disk_path,
datacenter=dc_ref,
spec=vmdk_create_spec)
session._wait_for_task(vmdk_create_task)
LOG.debug("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
" KB and type %(disk_type)s",
{"vmdk_file_size_in_kb": size_in_kb,
"disk_type": disk_type})
def copy_virtual_disk(session, dc_ref, source, dest):
"""Copy a sparse virtual disk to a thin virtual disk.
This is also done to generate the meta-data file whose specifics
depend on the size of the disk, thin/thick provisioning and the
storage adapter type.
:param session: - session for connection
:param dc_ref: - data center reference object
:param source: - source datastore path
:param dest: - destination datastore path
:returns: None
"""
LOG.debug("Copying Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
vim = session.vim
vmdk_copy_task = session._call_method(
vim,
"CopyVirtualDisk_Task",
vim.service_content.virtualDiskManager,
sourceName=source,
sourceDatacenter=dc_ref,
destName=dest)
session._wait_for_task(vmdk_copy_task)
LOG.debug("Copied Virtual Disk %(source)s to %(dest)s",
{'source': source, 'dest': dest})
def reconfigure_vm(session, vm_ref, config_spec):
"""Reconfigure a VM according to the config spec."""
reconfig_task = session._call_method(session.vim,
"ReconfigVM_Task", vm_ref,
spec=config_spec)
session._wait_for_task(reconfig_task)
def power_on_instance(session, instance, vm_ref=None):
"""Power on the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering on the VM", instance=instance)
try:
poweron_task = session._call_method(
session.vim,
"PowerOnVM_Task", vm_ref)
session._wait_for_task(poweron_task)
LOG.debug("Powered on the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered on", instance=instance)
def _get_vm_port_indices(session, vm_ref):
extra_config = session._call_method(vutil,
'get_object_property',
vm_ref,
'config.extraConfig')
ports = []
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value != 'free'):
ports.append(int(option.key.split('.')[2]))
return ports
def get_attach_port_index(session, vm_ref):
"""Get the first free port index."""
ports = _get_vm_port_indices(session, vm_ref)
# No ports are configured on the VM
if not ports:
return 0
ports.sort()
configured_ports_len = len(ports)
# Find the first free port index
for port_index in range(configured_ports_len):
if port_index != ports[port_index]:
return port_index
return configured_ports_len
def get_vm_detach_port_index(session, vm_ref, iface_id):
extra_config = session._call_method(vutil,
'get_object_property',
vm_ref,
'config.extraConfig')
if extra_config is not None:
options = extra_config.OptionValue
for option in options:
if (option.key.startswith('nvp.iface-id.') and
option.value == iface_id):
return int(option.key.split('.')[2])
def power_off_instance(session, instance, vm_ref=None):
"""Power off the specified instance."""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
LOG.debug("Powering off the VM", instance=instance)
try:
poweroff_task = session._call_method(session.vim,
"PowerOffVM_Task", vm_ref)
session._wait_for_task(poweroff_task)
LOG.debug("Powered off the VM", instance=instance)
except vexc.InvalidPowerStateException:
LOG.debug("VM already powered off", instance=instance)
def find_rescue_device(hardware_devices, instance):
"""Returns the rescue device.
The method will raise an exception if the rescue device does not
exist. The resuce device has suffix '-rescue.vmdk'.
:param hardware_devices: the hardware devices for the instance
:param instance: nova.objects.instance.Instance object
:return: the rescue disk device object
"""
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
'VirtualDiskFlatVer2BackingInfo' and
device.backing.fileName.endswith('-rescue.vmdk')):
return device
msg = _('Rescue device does not exist for instance %s') % instance.uuid
raise exception.NotFound(msg)
def get_ephemeral_name(id_):
return 'ephemeral_%d.vmdk' % id_
def _detach_and_delete_devices_config_spec(client_factory, devices):
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
for device in devices:
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
virtual_device_config.device = device
virtual_device_config.fileOperation = "destroy"
device_config_spec.append(virtual_device_config)
config_spec.deviceChange = device_config_spec
return config_spec
def detach_devices_from_vm(session, vm_ref, devices):
"""Detach specified devices from VM."""
client_factory = session.vim.client.factory
config_spec = _detach_and_delete_devices_config_spec(
client_factory, devices)
reconfigure_vm(session, vm_ref, config_spec)
def get_ephemerals(session, vm_ref):
devices = []
hardware_devices = get_hardware_devices(session, vm_ref)
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if (device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo"):
if 'ephemeral' in device.backing.fileName:
devices.append(device)
return devices
def get_swap(session, vm_ref):
hardware_devices = get_hardware_devices(session, vm_ref)
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
'swap' in device.backing.fileName):
return device
def create_folder(session, parent_folder_ref, name):
"""Creates a folder in vCenter
A folder of 'name' will be created under the parent folder.
The moref of the folder is returned.
"""
LOG.debug("Creating folder: %(name)s. Parent ref: %(parent)s.",
{'name': name,
'parent': vutil.get_moref_value(parent_folder_ref)})
try:
folder = session._call_method(session.vim, "CreateFolder",
parent_folder_ref, name=name)
LOG.info("Created folder: %(name)s in parent %(parent)s.",
{'name': name,
'parent': vutil.get_moref_value(parent_folder_ref)})
except vexc.DuplicateName as e:
LOG.debug("Folder already exists: %(name)s. Parent ref: %(parent)s.",
{'name': name,
'parent': vutil.get_moref_value(parent_folder_ref)})
val = e.details['object']
folder = vutil.get_moref(val, 'Folder')
return folder
def folder_ref_cache_update(path, folder_ref):
_FOLDER_PATH_REF_MAPPING[path] = folder_ref
def folder_ref_cache_get(path):
return _FOLDER_PATH_REF_MAPPING.get(path)
def _get_vm_name(display_name, id_):
if display_name:
return '%s (%s)' % (display_name[:41], id_[:36])
return id_[:36]
def rename_vm(session, vm_ref, instance):
vm_name = _get_vm_name(instance.display_name, instance.uuid)
rename_task = session._call_method(session.vim, "Rename_Task", vm_ref,
newName=vm_name)
session._wait_for_task(rename_task)
def _create_fcd_id_obj(client_factory, fcd_id):
id_obj = client_factory.create('ns0:ID')
id_obj.id = fcd_id
return id_obj
def attach_fcd(
session, vm_ref, fcd_id, ds_ref_val, controller_key, unit_number
):
client_factory = session.vim.client.factory
disk_id = _create_fcd_id_obj(client_factory, fcd_id)
ds_ref = vutil.get_moref(ds_ref_val, 'Datastore')
LOG.debug("Attaching fcd (id: %(fcd_id)s, datastore: %(ds_ref_val)s) to "
"vm: %(vm_ref)s.",
{'fcd_id': fcd_id,
'ds_ref_val': ds_ref_val,
'vm_ref': vm_ref})
task = session._call_method(
session.vim, "AttachDisk_Task", vm_ref, diskId=disk_id,
datastore=ds_ref, controllerKey=controller_key, unitNumber=unit_number)
session._wait_for_task(task)
def detach_fcd(session, vm_ref, fcd_id):
client_factory = session.vim.client.factory
disk_id = _create_fcd_id_obj(client_factory, fcd_id)
LOG.debug("Detaching fcd (id: %(fcd_id)s) from vm: %(vm_ref)s.",
{'fcd_id': fcd_id, 'vm_ref': vm_ref})
task = session._call_method(
session.vim, "DetachDisk_Task", vm_ref, diskId=disk_id)
session._wait_for_task(task)
|
|
"""
Test a bunch of functions that serve as an interface to standard stellar data
table
"""
import numpy as np
import logging
from astropy.io import fits
from astropy.table import Table
# from astropy.units.core import UnitConversionError
try:
import exceptions
except ImportError:
import builtins as exceptions # python 3 consistent
import sys
sys.path.insert(0, '..')
from chronostar.component import SphereComponent
from chronostar.synthdata import SynthData
from chronostar import tabletool
from chronostar import coordinate
from chronostar import transform
HIST_FILE_NAME = 'sample_data/small_historical_sample_table.fits'
CURR_FILE_NAME = 'sample_data/small_current_sample_table.fits'
# -----------------------------------------------
# -- To check correctness, have copied over --
# -- previous implementation into this script --
# -----------------------------------------------
# retired function, put here for comparison reasons
def transformAstrCovsToCartesian(astr_covs, astr_arr):
"""
Converts a covariance matrix from astrometric coords to LSR XYZUVW
Parameters
----------
astr_covs: ([nstars, 6, 6] array)
values in the diagaonal are the squared errors of
(ra, dec, plx, pm_ra, pm_dec, rv), with the offdiagonals the product
of the correlation (valued between -1 and 1) and the two
intersecting coordinates.
astr_arr: ([nstars, 6] array)
the measured (mean) astrometric values
(ra, dec, plx, pm_ra, pm-dec, rv)
"""
nstars = astr_arr.shape[0]
xyzuvw_covs = np.zeros((nstars, 6, 6))
for ix in range(nstars):
xyzuvw_covs[ix] = transform.transform_covmatrix(
astr_covs[ix], coordinate.convert_astrometry2lsrxyzuvw, astr_arr[ix],
dim=6
)
return xyzuvw_covs
# retired function, put here for comparison reasons
def convertAstrErrsToCovs(err_arr):
"""
Converts astrometry errors for each star into covariance matrices
Note that a negligible error is inserted for ra and dec
Parameters
----------
err_arr : ([nstars, 6] float array), astrometry errors with placeholder
0's for ra and dec: (ra, dec, pi, pm_ra, pm_dec, rv)
Returns
-------
astr_covs : ([nstars, 6, 6] float array), covariance matrix made by
inserting the square of the errors into the diagonal
"""
err_arr_cp = np.copy(err_arr)
nstars = err_arr_cp.shape[0]
err_arr_cp[:, :2] = 1e-1
logging.info("Angular position error is: {}".format(err_arr_cp[0,0]))
print("Angular position error is: {}".format(err_arr_cp[0,0]))
print("Changed!")
astr_covs = np.zeros((nstars, 6, 6))
for ix in range(nstars):
astr_covs[ix] = np.eye(6) * np.tile(err_arr_cp[ix], (6, 1))**2
return astr_covs
# Retired function, only here for comparison reasons
def convertTableToArray(star_table):
nstars = star_table['radeg'].shape[0]
measured_vals = np.vstack((
star_table['radeg'],
star_table['dedeg'],
star_table['plx'],
star_table['pmra'],
star_table['pmde'],
star_table['rv'],
)).T
errors = np.vstack((
np.zeros(nstars),
np.zeros(nstars),
star_table['e_plx'],
star_table['e_pmra'],
star_table['e_pmde'],
star_table['e_rv'],
)).T
return measured_vals, errors
# Retired funciton, only here for comparison reasons
def buildMeanAndCovMatFromRow(row):
"""
Build a covariance matrix from a row
Paramters
---------
row : astropy Table row
Entries: {X, Y, Z, U, V, W, dX, dY, ..., cXY, cXZ, ...}
Return
------
cov_mat : [6,6] numpy array
Diagonal elements are dX^2, dY^2, ...
Off-diagonal elements are cXY*dX*dY, cXZ*dX*dZ, ...
"""
dim = 6
CART_COL_NAMES = ['X', 'Y', 'Z', 'U', 'V', 'W',
'dX', 'dY', 'dZ', 'dU', 'dV', 'dW',
'c_XY', 'c_XZ', 'c_XU', 'c_XV', 'c_XW',
'c_YZ', 'c_YU', 'c_YV', 'c_YW',
'c_ZU', 'c_ZV', 'c_ZW',
'c_UV', 'c_UW',
'c_VW']
mean = np.zeros(dim)
for i, col_name in enumerate(CART_COL_NAMES[:6]):
mean[i] = row[col_name]
std_vec = np.zeros(dim)
for i, col_name in enumerate(CART_COL_NAMES[6:12]):
std_vec[i] = row[col_name]
corr_tri = np.zeros((dim,dim))
# Insert upper triangle (top right) correlations
for i, col_name in enumerate(CART_COL_NAMES[12:]):
corr_tri[np.triu_indices(dim,1)[0][i],np.triu_indices(dim,1)[1][i]] \
=row[col_name]
# Build correlation matrix
corr_mat = np.eye(6) + corr_tri + corr_tri.T
# Multiply through by standard deviations
cov_mat = corr_mat * std_vec * std_vec.reshape(6,1)
return mean, cov_mat
# Retired funciton, put here for comparison reasons
def loadDictFromTable(table):
"""
Takes the data in the table, builds dict with array of mean and cov matrices
Paramters
---------
table : Astropy.Table (or str)
assoc_name : str
One of the labels in Moving group column:
118 Tau, 32 Orionis, AB Doradus, Carina, Carina-Near, Columba,
Coma Ber, Corona Australis, Hyades, IC 2391, IC 2602,
Lower Centaurus-Crux, Octans, Platais 8, Pleiades, TW Hya, Taurus,
Tucana-Horologium, Upper Centaurus Lupus, Upper CrA, Upper Scorpius,
Ursa Major, beta Pictoris, chi{ 1 For (Alessi 13), epsilon Cha,
eta Cha, rho Ophiuci
Returns
-------
dict :
xyzuvw : [nstars,6] array of xyzuvw means
xyzuvw_cov : [nstars,6,6] array of xyzuvw covariance matrices
file_name : str
if table loaded from file, the pathway is stored here
indices : [nstars] int array
the table row indices of data converted into arrays
gaia_ids : [nstars] int array
the gaia ids of stars successfully converted
"""
star_pars = {}
if type(table) is str:
file_name = table
star_pars['file_name'] = file_name
table = Table.read(table)
gaia_col_name = 'source_id'
if gaia_col_name not in table.colnames:
gaia_col_name = 'gaia_dr2'
xyzuvw = []
xyzuvw_cov = []
indices = []
gaia_ids = []
nrows = len(table[gaia_col_name])
for ix, row in enumerate(table):
if nrows > 10000 and ix % 1000==0:
print("Done {:7} of {} | {:.2}%".format(ix, nrows,
float(ix)/nrows*100))
if np.isfinite(row['U']):
mean, cov = buildMeanAndCovMatFromRow(row)
xyzuvw.append(mean)
xyzuvw_cov.append(cov)
indices.append(ix)
gaia_ids.append(row[gaia_col_name])
star_pars['xyzuvw'] = np.array(xyzuvw).astype(np.float64)
star_pars['xyzuvw_cov'] = np.array(xyzuvw_cov).astype(np.float64)
star_pars['indices'] = np.array(indices)
star_pars['gaia_ids'] = np.array(gaia_ids)
star_pars['table'] = table
return star_pars
# Retired funciton, put here for comparison reasons
def convertManyRecToArray(data):
"""
Convert many Fits Records in astrometry into mean and covs (astro)
Note: ra_error and dec_error are in 'mas' while ra and dec
are given in degrees. Everything else is standard:
plx [mas], pm [mas/yr], rv [km/s]
Parameters
----------
data: [nstars] array of Recs
'source_id', 'ra', 'ra_error', 'dec', 'dec_error', 'parallax',
'parallax_error', 'pmra', 'pmra_error', 'pmdec', 'pmdec_error',
'ra_dec_corr', 'ra_parallax_corr', 'ra_pmra_corr',
'ra_pmdec_corr', 'dec_parallax_corr', 'dec_pmra_corr',
'dec_pmdec_corr', 'parallax_pmra_corr', 'parallax_pmdec_corr',
'pmra_pmdec_corr', 'astrometric_primary_flag', 'phot_g_mean_mag',
'radial_velocity', 'radial_velocity_error', 'teff_val'
Returns
-------
means: [nstars, 6] array
covs: [nstars, 6] array
"""
nstars = data.shape[0]
means = np.zeros((nstars,6))
means[:,0] = data['ra']
means[:,1] = data['dec']
means[:,2] = data['parallax']
means[:,3] = data['pmra']
means[:,4] = data['pmdec']
means[:,5] = data['radial_velocity']
# Array of dictionary keys to aid construction of cov matrices
cls = np.array([
['ra_error', 'ra_dec_corr', 'ra_parallax_corr',
'ra_pmra_corr', 'ra_pmdec_corr', None],
['ra_dec_corr', 'dec_error', 'dec_parallax_corr',
'dec_pmra_corr', 'dec_pmdec_corr', None],
['ra_parallax_corr', 'dec_parallax_corr', 'parallax_error',
'parallax_pmra_corr', 'parallax_pmdec_corr', None],
['ra_pmra_corr', 'dec_pmra_corr', 'parallax_pmra_corr',
'pmra_error', 'pmra_pmdec_corr', None],
['ra_pmdec_corr', 'dec_pmdec_corr', 'parallax_pmdec_corr',
'pmra_pmdec_corr', 'pmdec_error', None],
[None, None, None,
None, None, 'radial_velocity_error']
])
# Construct an [nstars,6,6] array of identity matrices
covs = np.zeros((nstars,6,6))
idx = np.arange(6)
covs[:, idx, idx] = 1.0
# Insert correlations into off diagonals
for i in range(0,5):
for j in range(i+1,5):
covs[:,i,j] = covs[:,j,i] = data[cls[i,j]]
# multiply each row and each column by appropriate error
for i in range(6):
covs[:,i,:] *= np.tile(data[cls[i,i]], (6,1)).T
covs[:,:,i] *= np.tile(data[cls[i,i]], (6,1)).T
# Might need to introduce some artificial uncertainty in
# ra and dec so as to avoid indefinite matrices (too narrow)
# RA and DEC errors are actually quoted in mas, so we convert cov
# entries into degrees
covs[:,:,:2] /= 3600000.
covs[:,:2,:] /= 3600000.
return means, covs
# Retired function put here for comparision reasons
def convertGaiaToXYZUVWDict(astr_file):
"""
Supposed to generate XYZYVW dictionary for input to GroupFitter
Doesn't work on whole Gaia catalogue... too much memory I think
TODO: Sort out a more consistent way to handle file names...
"""
hdul = fits.open(astr_file)#, memmap=True)
means, covs = convertManyRecToArray(hdul[1].data)
astr_dict = {'astr_mns': means, 'astr_covs': covs}
cart_dict = convertMeasurementsToCartesian(astr_dict=astr_dict)
return cart_dict
# retired, put here for comparison reasons
def convertMeasurementsToCartesian(t=None, loadfile='', astr_dict=None):
"""
Parameters
----------
t : astropy Table with the following columns:
name : id or name of star
radeg : right ascension in degrees
dedeg : declination in degrees
plx : parallax in mas
e_plx : error of parallax in mas
pmra : proper motion in right ascension in mas/yr
e_pmra : error of pm in right ascension in mas/yr
pmde : proper motion in declination in mas/yr
e_pmde : error of pm in declination in mas/yr
rv : radial velocity in km/s
e_rv : error of radial velocity in km/s
loadfile : (String {''})
if t is None, try and load table from loadfile
savefile : (String {''})
if non-empty, will save a fits file with this filename. Appropriate
file extension is applied if not there.
Returns
-------
dict :
t : astropy table
xyzuvw : [nstars, 6] array
space positions and velocities of each star
xyzuvw_cov : [nstars, 6, 6] array
covariance of positions and velocities of each star
"""
while True:
if t:
# nstars = len(t)
astr_arr, err_arr = convertTableToArray(t)
astr_covs = convertAstrErrsToCovs(err_arr)
break
if loadfile:
t = Table.read(loadfile, format='ascii')
# nstars = len(t)
astr_arr, err_arr = convertTableToArray(t)
astr_covs = convertAstrErrsToCovs(err_arr)
break
if astr_dict:
astr_arr = astr_dict['astr_mns']
astr_covs = astr_dict['astr_covs']
# nstars = astr_arr.shape[0]
break
raise StandardError
xyzuvw = coordinate.convert_many_astrometry2lsrxyzuvw(astr_arr, mas=True)
xyzuvw_cov = transformAstrCovsToCartesian(astr_covs, astr_arr)
xyzuvw_dict = {'table':t, 'xyzuvw':xyzuvw, 'xyzuvw_cov':xyzuvw_cov}
return xyzuvw_dict
def alternativeBuildCovMatrix(data):
nstars = len(data)
cov_labels = np.array([
['X', 'c_XY', 'c_XZ', 'c_XU', 'c_XV', 'c_XW'],
['c_XY', 'Y', 'c_YZ', 'c_YU', 'c_YV', 'c_YW'],
['c_XZ', 'c_YZ', 'Z', 'c_ZU', 'c_ZV', 'c_ZW'],
['c_XU', 'c_YU', 'c_ZU', 'U', 'c_UV', 'c_UW'],
['c_XV', 'c_YV', 'c_ZV', 'c_UV', 'V', 'c_VW'],
['c_XW', 'c_YW', 'c_ZW', 'c_UW', 'c_VW', 'W'],
])
# Construct an [nstars,6,6] array of identity matrices
covs = np.zeros((nstars,6,6))
idx = np.arange(6)
covs[:, idx, idx] = 1.0
# Insert correlations into off diagonals
for i in range(0,5):
for j in range(i+1,5):
covs[:,i,j] = covs[:,j,i] = data[cov_labels[i,j]]
# multiply each row and each column by appropriate error
for i in range(6):
covs[:,i,:] *= np.tile(data[cov_labels[i,i]], (6,1)).T
covs[:,:,i] *= np.tile(data[cov_labels[i,i]], (6,1)).T
return covs
# -----------------------------------------------
# -----------------------------------------------
# -- TESTS BEGIN HERE ---------------------
# -----------------------------------------------
# -----------------------------------------------
def test_transform_astrocart():
orig_astr_data = \
tabletool.build_data_dict_from_table(table=HIST_FILE_NAME, cartesian=False,
historical=False)
orig_cart_data = \
tabletool.build_data_dict_from_table(table=HIST_FILE_NAME, cartesian=True,
historical=True)
astr_mean = orig_astr_data['means'][0]
astr_cov = orig_astr_data['covs'][0]
cart_mean = orig_cart_data['means'][0] # [67.568, -5.279, 16.888, -3.871, 1.713, 1.997]
# helio: [67.568, -5.279, -8.112, -14.971, -10.527, -5.253]
cart_cov = orig_cart_data['covs'][0]
calc_cart_mean, calc_cart_cov = tabletool.convert_astro2cart(astr_mean, astr_cov)
def test_convertTableXYZUVWToArray():
"""
Check that generating cartesian means and covariance matrices matches
previous implementation
"""
orig_star_pars = loadDictFromTable(HIST_FILE_NAME)
main_colnames, error_colnames, corr_colnames =\
tabletool.get_historical_cart_colnames()
data = tabletool.build_data_dict_from_table(
orig_star_pars['table'][orig_star_pars['indices']],
main_colnames=main_colnames,
error_colnames=error_colnames,
corr_colnames=corr_colnames
)
assert np.allclose(orig_star_pars['xyzuvw'], data['means'])
assert np.allclose(orig_star_pars['xyzuvw_cov'], data['covs'])
def test_convertSynthTableToCart():
"""
Checks that current day measured cartesian values (with negligbile
measurement error) match the true current day cartesian values
"""
AGE = 40.
PARS = np.array([
[0., 0., 0., 0., 0., 0., 10., 5., AGE],
])
STARCOUNTS = [50] #, 30]
COMPONENTS = SphereComponent
MEASUREMENT_ERROR = 1e-10
# Generate synthetic data
synth_data = SynthData(pars=PARS, starcounts=STARCOUNTS,
Components=COMPONENTS,
measurement_error=MEASUREMENT_ERROR,
)
synth_data.synthesise_everything()
# Convert (inplace) astrometry to cartesian
tabletool.convert_table_astro2cart(synth_data.table)
# Check consistency between true current-day kinematics and measured
# current-day kinematics (with negliglbe error)
for dim in 'XYZUVW':
dim_now = dim.lower() + '_now'
assert np.allclose(synth_data.table[dim_now],
synth_data.table[dim])
def test_convertAstrTableToCart():
"""
Using a historical table, confirm that cartesian conversion yields
same results by comparing the cartesian means and covariance matrices
are identical.
Gets historical cartesian data from building data from table cart cols.
Gets updated cartesian data from building astro data from table cols,
converting to cartesian (stored back into table) then building data
from newly inserted table cart cols.
"""
# hist_filename = '../data/paper1/historical_beta_Pictoris_with_gaia_small_everything_final.fits'
hist_table = Table.read(HIST_FILE_NAME)
# curr_filename = '../data/paper1/beta_Pictoris_with_gaia_small_everything_final.fits'
curr_table = Table.read(CURR_FILE_NAME)
# Drop stars that have gone through any binary checking
# hist_table = Table(hist_table[100:300])
# curr_table = Table(curr_table[100:300])
# load in original means and covs
orig_cart_data =\
tabletool.build_data_dict_from_table(table=hist_table, cartesian=True,
historical=True)
tabletool.convert_table_astro2cart(table=curr_table, write_table=False)
cart_data = tabletool.build_data_dict_from_table(curr_table, cartesian=True)
assert np.allclose(orig_cart_data['means'], cart_data['means'])
assert np.allclose(hist_table['dX'], curr_table['X_error'])
assert np.allclose(orig_cart_data['covs'], cart_data['covs'])
def test_badColNames():
"""
Check that columns have consistent (or absent) units across measurements
and errors
First test comparing column with degrees to column with mas/yr raises
UserWarning
Then test comparing colum with degrees to column without units raises
no issue.
"""
main_colnames, error_colnames, corr_colnames = \
tabletool.get_colnames(cartesian=False)
# main_colnames[5] = 'radial_velocity_best'
# error_colnames[5] = 'radial_velocity_error_best'
# corrupt ordering of column names
corrupted_error_colnames = list(error_colnames)
corrupted_error_colnames[0], corrupted_error_colnames[3] =\
error_colnames[3], error_colnames[0]
# filename = '../data/paper1/beta_Pictoris_with_gaia_small_everything_final.fits'
table = Table.read(CURR_FILE_NAME)
# Only need a handful of rows
table = Table(table[:10])
# Catch when units are inconsistent
try:
tabletool.convert_table_astro2cart(
table,
astr_main_colnames=main_colnames,
astr_error_colnames=corrupted_error_colnames,
astr_corr_colnames=corr_colnames
)
except Exception as e:
assert type(e) == exceptions.UserWarning
# In the case where units have not been provided, then just leave it be
try:
error_colnames[0] = 'ra_dec_corr'
tabletool.convert_table_astro2cart(table,
astr_main_colnames=main_colnames,
astr_error_colnames=error_colnames,
astr_corr_colnames=corr_colnames)
except:
assert False
def test_build_data_from_incomplete_table():
"""
Sometimes rows will be missing data, e.g. from when binaries
have been merged. build_data_from_dict should detect the
presence of nans and skip them
"""
# build a dummy table of data
NSTARS = 10
NDIM = 6
missing_row_ix = (np.array([0,3,4]),)
means = np.random.rand(NSTARS,NDIM)
covs = np.array(NSTARS*[np.eye(NDIM,NDIM)])
nan_mask = np.array(NSTARS*[False])
# check bad data are within index range
assert np.all(missing_row_ix[0] < NSTARS)
nan_mask[missing_row_ix] = True
covs[nan_mask] = np.nan
names = np.arange(NSTARS)
dummy_table = Table()
dummy_table['names'] = names
tabletool.append_cart_cols_to_table(dummy_table)
for row, mean, cov in zip(dummy_table, means, covs):
tabletool.insert_data_into_row(row, mean, cov)
star_pars = tabletool.build_data_dict_from_table(dummy_table)
assert not np.any(np.isnan(star_pars['means']))
assert not np.any(np.isnan(star_pars['covs']))
# check the correct number of rows have been returned
assert len(star_pars['means']) == np.sum(np.logical_not(nan_mask))
assert len(star_pars['covs']) == np.sum(np.logical_not(nan_mask))
if __name__ == '__main__':
test_transform_astrocart()
test_convertAstrTableToCart()
|
|
"""
Code to manage IAM roles, groups, and policies.
"""
from __future__ import print_function
import json
import os
import os.path
import urllib
import urllib2
from collections import defaultdict
import logging
from datetime import datetime
import boto
import boto.iam
import boto3
import botocore
from .disco_config import read_config
from .disco_aws_util import is_truthy
from .resource_helper import throttled_call
logger = logging.getLogger(__name__)
IAM_USER_POLICY_DIR = "iam/user_policies"
IAM_INSTANCE_POLICY_DIR = "iam/instance_policies"
IAM_GROUP_DIR = "iam/group_membership"
IAM_ARPD_PATH = "iam/federation/AssumeRolePolicyDocument.iam"
IAM_EXT = ".iam"
TR_EXT = ".tr"
GROUP_EXT = ".grp"
GROUP_PREFIX = "disco"
IAM_SECTION = "iam"
class DiscoIAM(object):
'''Class orchestrating identity and access management on AWS (IAM)'''
def __init__(self, config=None, environment=None, boto2_connection=None, boto3_connection=None):
if config:
self._config = config
else:
self._config = read_config()
self._boto2_connection = boto2_connection
self._boto3_connection = boto3_connection
self._environment = environment
self._now = datetime.utcnow().isoformat('T').replace(':', '.')
@property
def connection(self):
"""
Lazily creates boto2 IAM connection
"""
if not self._boto2_connection:
self._boto2_connection = boto.connect_iam()
return self._boto2_connection
@property
def boto3_iam(self):
"""
Lazily creates boto3 IAM connection
"""
if not self._boto3_connection:
self._boto3_connection = boto3.client('iam')
return self._boto3_connection
def set_environment(self, environment):
"""Sets the environment"""
self._environment = environment
def option(self, key):
"""Fetch a configuration option"""
if self._environment:
env_key = "{0}@{1}".format(key, self._environment)
if self._config.has_option(IAM_SECTION, env_key):
return self._config.get(IAM_SECTION, env_key)
if self._config.has_option(IAM_SECTION, key):
return self._config.get(IAM_SECTION, key)
return None
def option_list(self, key):
"""Fetch a configuration option as a list"""
value = self.option(key)
return value.split() if value else []
def get_certificate_arn(self, dns_name):
"""Returns a Server Certificate ARN from IAM given the DNS name"""
certs = self.boto3_iam.list_server_certificates()["ServerCertificateMetadataList"]
cert = [cert['Arn'] for cert in certs if cert['ServerCertificateName'] == dns_name]
return cert[0] if cert else None
def list_groups(self):
'''Lists IAM User Groups'''
groups = throttled_call(
self.connection.get_all_groups,
max_items=500
).list_groups_response.list_groups_result.groups
return [
group.group_name
for group in groups
]
def create_group(self, group_name):
'''Creates an IAM User Group'''
throttled_call(self.connection.create_group, group_name)
def remove_group(self, group_name):
'''Deletes an IAM User Group (this removes users and policies from group first)'''
logger.debug("Removing group %s.", group_name)
for user in self.list_group_members(group_name):
self.remove_user_from_group(user, group_name)
for policy in self.list_group_policies(group_name):
self.remove_group_policy(group_name, policy)
throttled_call(self.connection.delete_group, group_name)
def list_group_policies(self, group_name):
'''Lists all policies attached to an IAM User Group'''
return throttled_call(
self.connection.get_all_group_policies,
group_name,
max_items=500
).list_group_policies_response.list_group_policies_result.policy_names
def get_group_policy(self, group_name, policy_name):
"""Returns one of a group's IAM policies by name"""
resp = throttled_call(
self.connection.get_group_policy,
group_name,
policy_name
).get_group_policy_response
return urllib.unquote(resp.get_group_policy_result.policy_document)
def remove_group_policy(self, group_name, policy_name):
"""Removes one of a group's IAM policies by name"""
throttled_call(self.connection.delete_group_policy, group_name, policy_name)
def _format_policy_name(self, policy_name):
return policy_name + '_' + self._now
def set_group_policy(self, group_name, policy_name, policy_file):
"""Sets one of a groups IAM policies by name"""
logger.debug("Applying policy %s to group %s from %s.", policy_name, group_name, policy_file)
with open(policy_file) as infile:
policy = infile.read()
policy_json = json.dumps(json.loads(policy), indent=4) # indentation is important
throttled_call(
self.connection.put_group_policy,
group_name,
self._format_policy_name(policy_name),
policy_json
)
def list_users(self):
'''List IAM Users'''
users = throttled_call(
self.connection.get_all_users,
max_items=500
)
return [
user.user_name
for user in users.list_users_response.list_users_result.users
]
def print_users(self):
'''Pretty Prints IAM Users to standard output'''
users = self.list_users()
if not users:
return
fmt = "{0:<" + str(max([len(user) for user in users])) + "} {1}"
for user in users:
groups = throttled_call(
self.connection.get_groups_for_user,
user
).list_groups_for_user_response.list_groups_for_user_result.groups
groups_str = ",".join(sorted([group.group_name for group in groups]))
print((fmt.format(user, groups_str)))
def create_user(self, user_name):
'''Creates an IAM User'''
logger.debug("Creating user %s.", user_name)
throttled_call(self.connection.create_user, user_name)
def remove_user(self, user_name):
'''Deletes an IAM User'''
logger.debug("Removing user %s.", user_name)
for key in self.list_access_keys(user_name):
self.remove_access_key(user_name, key.access_key_id)
iam = boto3.resource('iam')
user = iam.User(user_name)
attached_policies = user.attached_policies.all()
for policy in attached_policies:
user.detach_policy(PolicyArn=policy.arn)
login_profile = iam.LoginProfile(user_name)
try:
login_profile.delete()
except botocore.exceptions.ClientError as error:
logger.debug("%s doesnt have a login profile. Error Response: %s", user_name, error.response)
throttled_call(self.connection.delete_user, user_name)
def list_user_groups(self, user_name):
'''Lists groups that IAM User is a member of'''
groups_response = throttled_call(
self.connection.get_groups_for_user,
user_name,
max_items=500
)
groups = groups_response.list_groups_for_user_response.list_groups_for_user_result.groups
return [group.group_name for group in groups]
def add_user_to_group(self, user_name, group_name):
'''Adds an IAM User to a group'''
throttled_call(self.connection.add_user_to_group, group_name, user_name)
def remove_user_from_group(self, user_name, group_name):
'''Removes an IAM User from a group'''
throttled_call(self.connection.remove_user_from_group, group_name, user_name)
def list_access_keys(self, user_name):
'''Lists the AWS access keys attached to an IAM user'''
keys = throttled_call(
self.connection.get_all_access_keys,
user_name
).list_access_keys_response.list_access_keys_result.access_key_metadata
return [key for key in keys]
def create_access_key(self, user_name):
'''Creates an AWS access key for an IAM user'''
access_key = throttled_call(
self.connection.create_access_key,
user_name
).create_access_key_response.create_access_key_result
print("[Credentials]")
print(("aws_access_key_id = {0}".format(access_key.access_key_id)))
print(("aws_secret_access_key = {0}".format(access_key.secret_access_key)))
def activate_access_key(self, user_name, access_key_id):
'''Activates an AWS access key for an IAM user'''
throttled_call(self.connection.update_access_key, access_key_id, 'Active', user_name)
def deactivate_access_key(self, user_name, access_key_id):
'''Deactivates an AWS access key for an IAM user'''
throttled_call(self.connection.update_access_key, access_key_id, 'Inactive', user_name)
def remove_access_key(self, user_name, access_key_id):
'''Deletes an AWS access key from an IAM user'''
logger.debug("Removing %s's key %s", user_name, access_key_id)
throttled_call(self.connection.delete_access_key, access_key_id, user_name)
# TODO refactor instance profile functions
# 1. There is at most one role per instance profile which wasn't known by us when
# we wrote these functions.
# 2. Some of the function names bear little relation to what the functions actually do.
# 3. These do not use the snake case function naming convention.
def listinstanceprofiles(self):
'''Lists all instance profiles (each contains one instance role of the same name)'''
profiles = throttled_call(self.connection.list_instance_profiles)
return [
profile.instance_profile_name
for profile in (
profiles.list_instance_profiles_response.list_instance_profiles_result.instance_profiles
)
]
def list_roles_instance_profiles(self, role):
'''Lists the 0 or 1 instance roles associated with an instance profile'''
profiles = throttled_call(self.connection.list_instance_profiles_for_role, role)
return [
profile.role_name
for profile in (
profiles.list_instance_profiles_for_role_response
.list_instance_profiles_for_role_result.instance_profiles
)
]
def getinstanceprofile(self, instance_profile_name):
'''Returns instance role associated with the instance profile name'''
response = throttled_call(self.connection.get_instance_profile, instance_profile_name)
# despite common English sense, .roles returns a single element
role = response.get_instance_profile_response.get_instance_profile_result.roles
return role if role else None
def createinstanceprofile(self, instance_profile_name):
'''Creates an instance profile'''
throttled_call(self.connection.create_instance_profile, instance_profile_name)
def removeinstanceprofile(self, instance_profile_name):
'''Deletes an instance profile'''
throttled_call(self.connection.delete_instance_profile, instance_profile_name)
def listroles(self):
"""
Return all roles with deserialized Assume Role Policy Document
"""
roles = throttled_call(
self.connection.list_roles,
max_items=500
).list_roles_response.list_roles_result.roles
for role in roles:
role.assume_role_policy_document = AssumeRolePolicyDocument(role.assume_role_policy_document)
return roles
def createrole(self, role_name, arpd=None):
'''Creates an IAM Role'''
logger.debug("Creating role %s", role_name)
throttled_call(self.connection.create_role, role_name, arpd)
def removerole(self, role_name):
'''Deletes an IAM Role and any linked policies or instance profile'''
for policy in self.listrolepolicies(role_name):
self.removerolepolicy(role_name, policy)
for profile in self.list_roles_instance_profiles(role_name):
self.removerolefrominstanceprofile(role_name, profile)
self.removeinstanceprofile(profile)
throttled_call(self.connection.delete_role, role_name)
def listrolepolicies(self, role_name):
'''Lists policies attached to an IAM Role'''
response = throttled_call(
self.connection.list_role_policies,
role_name,
max_items=500
)
return [
policy_name
for policy_name in response.list_role_policies_response.list_role_policies_result.policy_names
]
def getrolepolicy(self, role_name, policy_name):
"""Return a role's IAM policy"""
response = throttled_call(self.connection.get_role_policy, role_name, policy_name)
policy = response.get_role_policy_response.get_role_policy_result.policy_document
return urllib.unquote(policy)
def createrolepolicy(self, role_name, policy_name, policy_file):
'''Creates an IAM Role Policy given an input file containing the appropriate json'''
logger.debug("Applying policy %s to role %s from %s.", policy_name, role_name, policy_file)
with open(policy_file) as infile:
policy = infile.read()
policy_json = json.dumps(json.loads(policy), indent=4) # indentation is important
throttled_call(
self.connection.put_role_policy,
role_name,
self._format_policy_name(policy_name),
policy_json
)
def removerolepolicy(self, role_name, policy_name):
'''Deletes an IAM role policy'''
throttled_call(self.connection.delete_role_policy, role_name, policy_name)
def addroletoinstanceprofile(self, role_name, instance_profile_name):
'''Adds an IAM Role to an IAM Instance Profile'''
throttled_call(self.connection.add_role_to_instance_profile, instance_profile_name, role_name)
def removerolefrominstanceprofile(self, role_name, instance_profile_name):
'''Removes an IAM Role to an IAM Instance Profile'''
throttled_call(self.connection.remove_role_from_instance_profile, instance_profile_name, role_name)
def decode_message(self, message):
'''Decodes an any encrypted AWS Error message'''
from boto.sts import STSConnection
sts_connection = STSConnection()
print("---------- Decoded message ----------")
print((sts_connection.decode_authorization_message(message).decoded_message))
def account_id(self):
"""
Current Account ID
"""
user = throttled_call(self.connection.get_user)
return user.get_user_response.get_user_result.user.arn.split(":")[4]
def reapply_user_policies(self):
'''Reapplies all IAM and federated user policies from configuration in IAM_USER_POLICY_DIR'''
policies = self._list_role_configs(IAM_USER_POLICY_DIR)
self.reapply_user_groups(policies)
self.reapply_trust_roles(policies)
def _list_role_configs(self, directory):
policy_files = os.listdir(directory)
return [policy[:-len(IAM_EXT)] for policy in policy_files if policy.endswith(IAM_EXT)]
def _list_roles_by_type(self):
roles = self.listroles()
federated_roles = [
role.role_name for role in roles
if role.assume_role_policy_document.is_federated()
]
unfederated_roles = [
role.role_name for role in roles
if not role.assume_role_policy_document.is_federated()
]
return federated_roles, unfederated_roles
def _prune_role_policies(self, role_name, keep_policy):
existing_policies = (set(self.listrolepolicies(role_name)) -
set([self._format_policy_name(keep_policy)]))
for policy in existing_policies:
self.removerolepolicy(role_name, policy)
def _cleanup_roles(self, old_roles, updated_roles):
deleted_roles = []
for role in old_roles:
if role not in updated_roles:
logger.debug("Cleaning up role: %s", role)
self.removerole(role)
deleted_roles.append(role)
return deleted_roles
def _get_federated_trust_relationship_json(self):
with open(IAM_ARPD_PATH) as arpd_file:
arpd = json.load(arpd_file)
try:
arpd["Statement"][0]["Principal"]["Federated"] = self.list_saml_providers()[0].arn
except (KeyError, IndexError):
raise RuntimeError("Failed to look up provider ARN. Make sure SAML provider is configured.")
return json.dumps(arpd, indent=4) # indentation is important
def _get_trust_relationship_json(self, policy):
tr_filename = "{0}/{1}{2}".format(IAM_USER_POLICY_DIR, policy, TR_EXT)
if not os.path.isfile(tr_filename):
return None
with open(tr_filename) as tr_file:
json_data = json.load(tr_file)
return json.dumps(json_data, indent=4) # indentation is important
def _create_role_name(self, role_prefix, policy, naked_roles=None):
if not naked_roles:
naked_roles = self.option_list("naked_roles")
if policy in naked_roles:
return policy
parts = []
if role_prefix:
parts.append(role_prefix)
if self._environment:
parts.append(self._environment)
return "_".join(parts + [policy])
# Allow >15 variables
# pylint: disable=R0914
def reapply_trust_roles(self, all_policies):
'''
Creates and updates roles which are assumed via a trust.
The trust may be either federated trust (SSO), or a specific trust ".tr" policy
document defined for a particular role.
These are not instance roles which have an associated instance profile.
'''
naked_roles = self.option_list("naked_roles")
role_prefix = self.option("role_prefix")
policy_blacklist = self.option_list("policy_blacklist")
federated_roles, unfederated_roles = self._list_roles_by_type()
existing_roles = set(federated_roles) | set(unfederated_roles)
non_instance_roles = [
role for role in existing_roles
if role.startswith(role_prefix)
]
try:
federated_trust = self._get_federated_trust_relationship_json()
except IOError:
federated_trust = None
logger.debug("Not federating trust, no trust document found at %s", IAM_ARPD_PATH)
policies = set(all_policies) - set(policy_blacklist)
updated_roles = []
for policy in policies:
role_name = self._create_role_name(role_prefix, policy, naked_roles)
specific_trust = self._get_trust_relationship_json(policy)
trust = specific_trust if specific_trust else federated_trust
if role_name in existing_roles:
if trust:
throttled_call(self.connection.update_assume_role_policy, role_name, trust)
else:
self.createrole(role_name, trust)
self.createrolepolicy(
role_name, policy,
"{0}/{1}{2}".format(IAM_USER_POLICY_DIR, policy, IAM_EXT)
)
self._prune_role_policies(role_name, keep_policy=policy)
updated_roles.append(role_name)
deleted_roles = self._cleanup_roles(non_instance_roles, updated_roles)
logger.debug("Updated federated user roles: %s.", updated_roles)
logger.debug("Deleted federated user roles: %s.", deleted_roles)
return (updated_roles, deleted_roles)
def reapply_instance_policies(self):
'''
Creates and updates roles which are assumed by instances.
The roles always begin with "instance_" and have an associated instance
profile of the same name.
'''
policies = self._list_role_configs(IAM_INSTANCE_POLICY_DIR)
role_prefix = "instance"
federated_roles, unfederated_roles = self._list_roles_by_type()
instance_roles = [role for role in unfederated_roles if role.startswith(role_prefix)]
updated_roles = []
for policy in policies:
role_name = "_".join([role_prefix, policy])
if role_name in instance_roles:
self._prune_role_policies(role_name, keep_policy=policy)
# TODO recreate the instance profile or make sure it exists.
elif role_name in federated_roles:
self.removerole(role_name)
self.createrole(role_name)
self.createinstanceprofile(role_name)
self.addroletoinstanceprofile(role_name, role_name)
else:
self.createrole(role_name)
self.createinstanceprofile(role_name)
self.addroletoinstanceprofile(role_name, role_name)
self.createrolepolicy(
role_name, policy,
"{0}/{1}{2}".format(IAM_INSTANCE_POLICY_DIR, policy, IAM_EXT)
)
updated_roles.append(role_name)
deleted_roles = self._cleanup_roles(instance_roles, updated_roles)
logger.debug("Updated instance roles: %s.", updated_roles)
logger.debug("Deleted instance roles: %s.", deleted_roles)
return (updated_roles, deleted_roles)
def _prune_group_policies(self, group_name, keep_policy):
existing_policies = (set(self.list_group_policies(group_name)) -
set([self._format_policy_name(keep_policy)]))
for existing_policy in existing_policies:
self.remove_group_policy(group_name, existing_policy)
def reapply_user_groups(self, policies):
'''Updates IAM User Groups from configuration (not including group membership)'''
groups = self.list_groups()
updated_groups = []
for policy in policies:
group_name = "{0}_{1}".format(GROUP_PREFIX, policy)
if group_name not in groups:
self.create_group(group_name)
self.set_group_policy(
group_name, policy,
"{0}/{1}{2}".format(IAM_USER_POLICY_DIR, policy, IAM_EXT)
)
if group_name in groups:
self._prune_group_policies(group_name, policy)
updated_groups.append(group_name)
deleted_groups = []
for group in groups:
if group not in updated_groups:
self.remove_group(group)
deleted_groups.append(group)
logger.debug("Updated policies on groups: %s.", updated_groups)
logger.debug("Deleted groups: %s.", deleted_groups)
return (updated_groups, deleted_groups)
def list_group_members(self, group):
'''Returns list of IAM Users in IAM Group'''
group = throttled_call(self.connection.get_group, group)
return [
user.user_name
for user in group.get_group_response.get_group_result.users
]
def _list_users_in_config(self, environment):
users = os.listdir("/".join([IAM_GROUP_DIR, environment]))
return [user[:-len(GROUP_EXT)] for user in users if user.endswith(GROUP_EXT)]
def reapply_group_members(self):
'''Updates IAM User Group membership from configuration'''
users = set(self._list_users_in_config(self._environment))
existing_users = set(self.list_users())
# Create users before we attempt to add them to groups
for user in users.difference(existing_users):
logger.debug("Creating user %s.", user)
self.create_user(user)
# Update group members
groups = defaultdict(set)
for user in users:
with open("{0}/{1}/{2}{3}".format(IAM_GROUP_DIR, self._environment, user, GROUP_EXT)) as userfile:
usergroups = userfile.read().split()
for group in usergroups:
groups["{0}_{1}".format(GROUP_PREFIX, group)].add(user)
for group in groups.keys() + self.list_groups():
existing_members = set(self.list_group_members(group))
for user in groups[group].difference(existing_members):
logger.debug("Adding user %s to group %s", user, group)
self.add_user_to_group(user, group)
for user in existing_members.difference(groups[group]):
logger.debug("Removing user %s from group %s", user, group)
self.remove_user_from_group(user, group)
# Delete users after they've been purged from groups
for user in existing_users.difference(users):
self.remove_user(user)
# Delete groups without users
if is_truthy(self.option("prune_empty_groups")):
empty_groups = [group
for group in self.list_groups()
if not self.list_group_members(group)]
for group in empty_groups:
self.remove_group(group)
logger.debug("Deleted empty groups: %s.", empty_groups)
def create_saml_provider(self):
"""
Create SAML providers from configuration
"""
name = self.option("saml_provider_name")
url = self.option("saml_provider_url")
if not name or not url:
logger.debug("No SAML provider")
return None
metadata_response = urllib2.urlopen(url)
federation_metadata = metadata_response.read()
throttled_call(self.connection.create_saml_provider, federation_metadata, name)
logger.debug("Created SAML provider: %s.", name)
return name
def list_saml_providers(self):
"""
List all SAML providers
"""
providers = throttled_call(self.connection.list_saml_providers)
return providers.list_saml_providers_response.list_saml_providers_result.saml_provider_list
def delete_saml_providers(self):
"""
Delete all SAML providers
"""
deleted = []
for provider in self.list_saml_providers():
deleted.append(provider.arn)
throttled_call(self.connection.delete_saml_provider, provider.arn)
logger.debug("Deleted SAML providers: %s.", deleted)
return deleted
def get_role_arn(self, policy_name):
"""
Returns the ARN of the role associated with the policy
"""
role_prefix = self.option("role_prefix")
role_name = self._create_role_name(role_prefix, policy_name)
role = self.boto3_iam.get_role(RoleName=role_name).get("Role")
return role["Arn"] if role else ""
class AssumeRolePolicyDocument(object):
"""
Assume Role Policy Document of a role.
"""
def __init__(self, document):
self.document = json.loads(urllib.unquote(document))
def is_federated(self):
"""Returns true iff a role is federated with Microsoft Active Directory"""
try:
return "Federated" in self.document["Statement"][0]["Principal"]
except (KeyError, IndexError):
pass
return False
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset object, which makes it easier to train NQL."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import nql
import numpy as np
import tensorflow.compat.v2 as tf
def tuple_generator_builder(
context,
tuple_input,
type_specs,
normalize_outputs = True,
field_separator = '\t',
entity_separator = ' || ',
start_line = 0,
end_line = None):
"""Create iterator over tuples produced by parsing lines from a file.
The lines are delimited by field_separator, with each being a different type
of feature. By convention the last field is the desired output of the model
given the first n-1 fields as input, and if normalize_outputs is
True, then this field will be L1-normalized.
The types of each field are given by the list of type_specs. The possible
type_specs are
1) The string name of an entity type declared in context. In this case the
corresponding part of 'lines' should be a set of entity names, of the provided
type, separated by the 'entity_separator' string. This will be converted to a
k-hot representation of that set of entities.
2) The python type str. In this case the corresponding part of 'lines' will
be passed on as a tf.string.
Args:
context: a NeuralQueryContext
tuple_input: Either a filename or an iterater over lines of data.
type_specs: list of specifications for how to parse each tab-separated field
of the line. The possible specifications are listed above.
normalize_outputs: treat the last line as a label and L1-normalize it
field_separator: string to separate fields
entity_separator: string to separate entity names
start_line: Begin returning values at this row.
end_line: Stop returning values before this row.
Returns:
a function taking no arguments that returns an Iterable[Tuple[Any]]
Raises:
ValueError, for incorrectly formatted lines
"""
def tuple_generator():
"""Closure produced by tuple_generator_builder."""
line_iter = tf.io.gfile.GFile(tuple_input) if isinstance(
tuple_input, str) else tuple_input
line_number = 0
for line_number, line in enumerate(line_iter):
if line_number < start_line:
continue
if end_line and line_number >= end_line:
break
line = line.strip('\r\n')
parts = line.split(field_separator)
if len(parts) != len(type_specs):
raise ValueError('line (%d) does not have %d fields: %r' %
(line_number, len(type_specs), line))
buf = []
try:
for i in range(len(parts)):
spec = type_specs[i]
if isinstance(spec, str) and context.is_type(spec):
parsed_val = k_hot_array_from_string_list(
context, spec, parts[i].split(entity_separator))
elif spec == str:
parsed_val = parts[i]
else:
raise ValueError('illegal type_spec %r' % spec)
buf.append(parsed_val)
if normalize_outputs:
buf_sum = np.sum(buf[-1])
if buf_sum:
buf[-1] /= buf_sum
yield tuple(buf)
except (nql.EntityNameError, nql.TypeNameError) as ex:
logging.warn('Problem %r on line (%d): %r', ex, line_number, line)
raise
return tuple_generator
def tuple_dataset(context,
tuple_input,
type_specs,
normalize_outputs = True,
field_separator = '\t',
entity_separator = ' || ',
start_line = 0,
end_line = None):
"""Produce a dataset by parsing lines from a file.
Lines are formatted as described in documents for tuple_generator_builder.
Args:
context: passed to tuple_generator_builder
tuple_input: passed to tuple_generator_builder
type_specs: passed to tuple_generator_builder
normalize_outputs: passed to tuple_generator_builder
field_separator: passed to tuple_generator_builder
entity_separator: passed to tuple_generator_builder
start_line: passed to tuple_generator_builder
end_line: passed to tuple_generator_builder
Returns:
tf.Data.Dataset over tuples, with one component for tab-separated field
"""
return tf.data.Dataset.from_generator(
tuple_generator_builder(context, tuple_input, type_specs,
normalize_outputs, field_separator,
entity_separator, start_line, end_line),
tuple([spec_as_tf_type(spec) for spec in type_specs]),
tuple([spec_as_shape(spec, context) for spec in type_specs]))
def spec_as_tf_type(spec):
"""Convert a type_spec to a tf type.
Args:
spec: a single specification for tuple_generator_builder
Returns:
type specification required by tf.data.Dataset.from_generator
"""
if spec == str:
return tf.string
elif isinstance(spec, int):
return tf.int32
else:
return tf.float32
def spec_as_shape(spec, context):
"""Convert a type_spec to a tf shape.
Args:
spec: a single specification for tuple_generator_builder
context: a NQL context
Returns:
tensor shape specification, as required by tf.data.Dataset.from_generator
"""
if spec == str:
return tf.TensorShape([])
elif isinstance(spec, int):
return tf.TensorShape([spec])
else:
return tf.TensorShape([context.get_max_id(spec)])
# GOOGLE_INTERNAL: TODO(b/124102056) Consider moving into nql.
def k_hot_array_from_string_list(context,
typename,
entity_names,
ignore_unknowns = False):
"""Create a numpy array encoding a k-hot set.
Args:
context: a NeuralExpressionContext
typename: type of entity_names
entity_names: list of names of type typename
ignore_unknowns: whether or not to ignore unknown entity names
Returns:
A k-hot-array representation of the set of entity_names. For frozen
dictionaries, unknown entity names are mapped to the unknown_id of their
type or discarded if the unknown_value of the type is None and
ignore_unknowns is set.
Unknown entity names will throw an nql.EntityNameException for non-frozen
dictionaries and frozen dictionary where the unknown_value is None and
ignore_unknowns is not set.
It is possible for this method to return an all-zeros array.
"""
# Empty string is not a valid entity_name.
ids = []
for entity_name in entity_names:
if not entity_name:
continue # Ignore empty names
entity_id = context.get_id(entity_name, typename)
if not ignore_unknowns and id is None:
raise nql.EntityNameError(entity_name, typename,
'Cannot make k-hot vector')
ids.append(entity_id)
# None is not a valid id.
valid_ids = [x for x in ids if x is not None]
max_id = context.get_max_id(typename)
result = np.zeros((max_id,), dtype='float32')
if valid_ids:
result[valid_ids] = 1.
return result
def placeholder_for_type(context,
type_spec,
name = None):
"""Produce a Tensorflow placeholder for this type_spec.
Args:
context: a NeuralQueryContext
type_spec: a single type_spec (see tuple_dataset)
name: a name to use for the placeholder
Returns:
a Tensorflow placeholder
Raises:
ValueError, if the type_spec is invalid
"""
if type_spec == str:
return tf.compat.v1.placeholder(tf.string, shape=[None], name=name)
elif isinstance(type_spec, str) and context.is_type(type_spec):
name = name or ('%s_ph' % type_spec)
return context.placeholder(name, type_spec).tf
else:
raise ValueError('bad type spec %r' % type_spec)
def build_feature_dict_mapper(feature_names):
"""Build a function for tf.data.Dataset.map.
Args:
feature_names: List of feature names.
Returns:
A function converting tuples into (dictionary of features, label).
"""
def mapper(*tuple_args):
d = {}
for i in range(len(feature_names)):
d[feature_names[i]] = tuple_args[i]
return d, tuple_args[-1]
return mapper
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import inspect
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s LTC too low! (Should be %s LTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s LTC too high! (Should be %s LTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = inspect.getsourcelines(predicate)
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "litecoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("[regtest]\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "litecoin.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "litecoin.conf")):
with open(os.path.join(datadir, "litecoin.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.talent_v4beta1.services.profile_service import pagers
from google.cloud.talent_v4beta1.types import common
from google.cloud.talent_v4beta1.types import histogram
from google.cloud.talent_v4beta1.types import profile
from google.cloud.talent_v4beta1.types import profile as gct_profile
from google.cloud.talent_v4beta1.types import profile_service
from google.protobuf import timestamp_pb2 # type: ignore
from google.protobuf import wrappers_pb2 # type: ignore
from .transports.base import ProfileServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import ProfileServiceGrpcTransport
from .transports.grpc_asyncio import ProfileServiceGrpcAsyncIOTransport
class ProfileServiceClientMeta(type):
"""Metaclass for the ProfileService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ProfileServiceTransport]]
_transport_registry["grpc"] = ProfileServiceGrpcTransport
_transport_registry["grpc_asyncio"] = ProfileServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[ProfileServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ProfileServiceClient(metaclass=ProfileServiceClientMeta):
"""A service that handles profile management, including profile
CRUD, enumeration and search.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "jobs.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProfileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ProfileServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ProfileServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ProfileServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def profile_path(project: str, tenant: str, profile: str,) -> str:
"""Returns a fully-qualified profile string."""
return "projects/{project}/tenants/{tenant}/profiles/{profile}".format(
project=project, tenant=tenant, profile=profile,
)
@staticmethod
def parse_profile_path(path: str) -> Dict[str, str]:
"""Parses a profile path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)/profiles/(?P<profile>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def tenant_path(project: str, tenant: str,) -> str:
"""Returns a fully-qualified tenant string."""
return "projects/{project}/tenants/{tenant}".format(
project=project, tenant=tenant,
)
@staticmethod
def parse_tenant_path(path: str) -> Dict[str, str]:
"""Parses a tenant path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/tenants/(?P<tenant>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, ProfileServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the profile service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ProfileServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ProfileServiceTransport):
# transport is a ProfileServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_profiles(
self,
request: Union[profile_service.ListProfilesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListProfilesPager:
r"""Lists profiles by filter. The order is unspecified.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_list_profiles():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.ListProfilesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_profiles(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.ListProfilesRequest, dict]):
The request object. List profiles request.
parent (str):
Required. The resource name of the tenant under which
the profile is created.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.services.profile_service.pagers.ListProfilesPager:
The List profiles response object.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.ListProfilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.ListProfilesRequest):
request = profile_service.ListProfilesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_profiles]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListProfilesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def create_profile(
self,
request: Union[profile_service.CreateProfileRequest, dict] = None,
*,
parent: str = None,
profile: gct_profile.Profile = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_profile.Profile:
r"""Creates and returns a new profile.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_create_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.CreateProfileRequest(
parent="parent_value",
)
# Make the request
response = client.create_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.CreateProfileRequest, dict]):
The request object. Create profile request.
parent (str):
Required. The name of the tenant this profile belongs
to.
The format is
"projects/{project_id}/tenants/{tenant_id}". For
example, "projects/foo/tenants/bar".
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
profile (google.cloud.talent_v4beta1.types.Profile):
Required. The profile to be created.
This corresponds to the ``profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.CreateProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.CreateProfileRequest):
request = profile_service.CreateProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if profile is not None:
request.profile = profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_profile(
self,
request: Union[profile_service.GetProfileRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> profile.Profile:
r"""Gets the specified profile.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_get_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.GetProfileRequest(
name="name_value",
)
# Make the request
response = client.get_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.GetProfileRequest, dict]):
The request object. Get profile request.
name (str):
Required. Resource name of the profile to get.
The format is
"projects/{project_id}/tenants/{tenant_id}/profiles/{profile_id}".
For example, "projects/foo/tenants/bar/profiles/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.GetProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.GetProfileRequest):
request = profile_service.GetProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_profile(
self,
request: Union[profile_service.UpdateProfileRequest, dict] = None,
*,
profile: gct_profile.Profile = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gct_profile.Profile:
r"""Updates the specified profile and returns the updated
result.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_update_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.UpdateProfileRequest(
)
# Make the request
response = client.update_profile(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.UpdateProfileRequest, dict]):
The request object. Update profile request
profile (google.cloud.talent_v4beta1.types.Profile):
Required. Profile to be updated.
This corresponds to the ``profile`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.types.Profile:
A resource that represents the
profile for a job candidate (also
referred to as a "single-source
profile").
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([profile])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.UpdateProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.UpdateProfileRequest):
request = profile_service.UpdateProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if profile is not None:
request.profile = profile
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("profile.name", request.profile.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_profile(
self,
request: Union[profile_service.DeleteProfileRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified profile.
Prerequisite: The profile has no associated applications
or assignments associated.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_delete_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.DeleteProfileRequest(
name="name_value",
)
# Make the request
client.delete_profile(request=request)
Args:
request (Union[google.cloud.talent_v4beta1.types.DeleteProfileRequest, dict]):
The request object. Delete profile request.
name (str):
Required. Resource name of the profile to be deleted.
The format is
"projects/{project_id}/tenants/{tenant_id}/profiles/{profile_id}".
For example, "projects/foo/tenants/bar/profiles/baz".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.DeleteProfileRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.DeleteProfileRequest):
request = profile_service.DeleteProfileRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_profile]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def search_profiles(
self,
request: Union[profile_service.SearchProfilesRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.SearchProfilesPager:
r"""Searches for profiles within a tenant.
For example, search by raw queries "software engineer in
Mountain View" or search by structured filters (location filter,
education filter, etc.).
See
[SearchProfilesRequest][google.cloud.talent.v4beta1.SearchProfilesRequest]
for more information.
.. code-block:: python
from google.cloud import talent_v4beta1
def sample_search_profiles():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.SearchProfilesRequest(
parent="parent_value",
)
# Make the request
page_result = client.search_profiles(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.talent_v4beta1.types.SearchProfilesRequest, dict]):
The request object. The request body of the
`SearchProfiles` call.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.talent_v4beta1.services.profile_service.pagers.SearchProfilesPager:
Response of SearchProfiles method.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a profile_service.SearchProfilesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, profile_service.SearchProfilesRequest):
request = profile_service.SearchProfilesRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.search_profiles]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.SearchProfilesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("ProfileServiceClient",)
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
## \todo This is getting used in a few places now - maybe put it in one
# place? Maybe a static method on NumericWidget?
def __floatToString( f ) :
return ( "%.4f" % f ).rstrip( '0' ).rstrip( '.' )
def __cameraSummary( plug ) :
info = []
if plug["renderCamera"]["enabled"].getValue() :
info.append( plug["renderCamera"]["value"].getValue() )
if plug["renderResolution"]["enabled"].getValue() :
resolution = plug["renderResolution"]["value"].getValue()
info.append( "%dx%d" % ( resolution[0], resolution[1] ) )
if plug["pixelAspectRatio"]["enabled"].getValue() :
pixelAspectRatio = plug["pixelAspectRatio"]["value"].getValue()
info.append( "Aspect %s" % __floatToString( pixelAspectRatio ) )
if plug["resolutionMultiplier"]["enabled"].getValue() :
resolutionMultiplier = plug["resolutionMultiplier"]["value"].getValue()
info.append( "Mult %s" % __floatToString( resolutionMultiplier ) )
if plug["renderCropWindow"]["enabled"].getValue() :
crop = plug["renderCropWindow"]["value"].getValue()
info.append( "Crop %s,%s-%s,%s" % tuple( __floatToString( x ) for x in ( crop.min.x, crop.min.y, crop.max.x, crop.max.y ) ) )
if plug["overscan"]["enabled"].getValue() :
info.append( "Overscan %s" % ( "On" if plug["overscan"]["value"].getValue() else "Off" ) )
return ", ".join( info )
def __motionBlurSummary( plug ) :
info = []
if plug["cameraBlur"]["enabled"].getValue() :
info.append( "Camera " + ( "On" if plug["cameraBlur"]["value"].getValue() else "Off" ) )
if plug["transformBlur"]["enabled"].getValue() :
info.append( "Transform " + ( "On" if plug["transformBlur"]["value"].getValue() else "Off" ) )
if plug["deformationBlur"]["enabled"].getValue() :
info.append( "Deformation " + ( "On" if plug["deformationBlur"]["value"].getValue() else "Off" ) )
if plug["shutter"]["enabled"].getValue() :
info.append( "Shutter " + str( plug["shutter"]["value"].getValue() ) )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferScene.StandardOptions,
"description",
"""
Specifies the standard options (global settings) for the
scene. These should be respected by all renderers.
""",
plugs = {
# section summaries
"options" : [
"layout:section:Camera:summary", __cameraSummary,
"layout:section:Motion Blur:summary", __motionBlurSummary,
],
# camera plugs
"options.renderCamera" : [
"description",
"""
The primary camera to be used for rendering. If this
is not specified, then a default orthographic camera
positioned at the origin is used.
""",
"layout:section", "Camera",
"label", "Camera",
],
"options.renderResolution" : [
"description",
"""
The resolution of the image to be rendered. Use the
resolution multiplier as a convenient way to temporarily
render at multiples of this resolution.
""",
"layout:section", "Camera",
"label", "Resolution",
],
"options.pixelAspectRatio" : [
"description",
"""
The aspect ratio (x/y) of the pixels in the rendered image.
""",
"layout:section", "Camera",
],
"options.resolutionMultiplier" : [
"description",
"""
Multiplier applied to the render resolution.
""",
"layout:section", "Camera",
],
"options.renderCropWindow" : [
"description",
"""
Limits the render to a region of the image. The rendered
image will have the same resolution as usual, but areas
outside the crop will be rendered black. Coordinates
range from 0,0 at the top left of the image to 1,1 at the
bottom right. The crop window tool in the viewer may be
used to set this interactively.
""",
"layout:section", "Camera",
"label", "Crop Window",
],
"options.overscan" : [
"description",
"""
Adds extra pixels to the sides of the rendered image.
This can be useful when camera shake or blur will be
added as a post process. This plug just enables overscan
as a whole - use the overscanTop, overscanBottom, overscanLeft
and overscanRight plugs to specify the amount of overscan
on each side of the image.
""",
"layout:section", "Camera",
],
"options.overscanTop" : [
"description",
"""
The amount of overscan at the top of the image. Specified
as a 0-1 proportion of the original image height.
""",
"layout:section", "Camera",
],
"options.overscanBottom" : [
"description",
"""
The amount of overscan at the bottom of the image. Specified
as a 0-1 proportion of the original image height.
""",
"layout:section", "Camera",
],
"options.overscanLeft" : [
"description",
"""
The amount of overscan at the left of the image. Specified
as a 0-1 proportion of the original image width.
""",
"layout:section", "Camera",
],
"options.overscanRight" : [
"description",
"""
The amount of overscan at the right of the image. Specified
as a 0-1 proportion of the original image width.
""",
"layout:section", "Camera",
],
# motion blur plugs
"options.cameraBlur" : [
"description",
"""
Whether or not camera motion is taken into
account in the renderered image. To specify the
number of segments to use for camera motion, use
a StandardAttributes node filtered for the camera.
""",
"layout:section", "Motion Blur",
"label", "Camera",
],
"options.transformBlur" : [
"description",
"""
Whether or not transform motion is taken into
account in the renderered image. To specify the
number of transform segments to use for each
object in the scene, use a StandardAttributes node
with appropriate filters.
""",
"layout:section", "Motion Blur",
"label", "Transform",
],
"options.deformationBlur" : [
"description",
"""
Whether or not deformation motion is taken into
account in the renderered image. To specify the
number of deformation segments to use for each
object in the scene, use a StandardAttributes node
with appropriate filters.
""",
"layout:section", "Motion Blur",
"label", "Deformation",
],
"options.shutter" : [
"description",
"""
The interval over which the camera shutter is open.
Measured in frames, and specified relative to the
frame being rendered.
""",
"layout:section", "Motion Blur",
],
}
)
##########################################################################
# PlugValueWidgets
##########################################################################
GafferUI.PlugValueWidget.registerCreator(
GafferScene.StandardOptions,
"options.renderCamera.value",
GafferSceneUI.ScenePathPlugValueWidget
)
Gaffer.Metadata.registerPlugValue(
GafferScene.StandardOptions,
"options.renderCamera.value",
"scenePathPlugValueWidget:setNames", IECore.StringVectorData( [ "__cameras" ] )
)
Gaffer.Metadata.registerPlugValue(
GafferScene.StandardOptions,
"options.renderCamera.value",
"scenePathPlugValueWidget:setsLabel", "Show only cameras"
)
|
|
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for generating the C4 dataset."""
import functools
import gzip
import hashlib
import heapq
import io
import re
import threading
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
# WET file constants
_PAGE_DELIMITER = "WARC/1.0"
_URL_KEY = "WARC-Target-URI:"
_URL_DATE = "WARC-Date:"
_CONTENT_TYPE = "Content-Type:"
_CONTENT_LEN = "Content-Length:"
_METADATA_PREFIXES = ("WARC", "CONTENT-", "Content-")
# Filters
_MIN_WORDS_PER_LINE = 5
_MIN_NUM_SENTENCES = 3
_MAX_WORD_LENGTH = 1000
_END_MARKS = (".", "?", "!", "\"")
_ELLIPSIS = "..."
_POLICY_SUBSTRINGS = [
"terms of use", "privacy policy", "cookie policy", "uses cookies",
"use of cookies", "use cookies"
]
# Memoized sentence tokenizer.
_SENTENCE_TOKENIZER = None
UNKNOWN_LANGUAGE = "und"
def get_counter_inc_fn(namespace):
def counter_inc_fn(counter, amt=1):
tfds.core.lazy_imports.apache_beam.metrics.Metrics.counter(
namespace, counter).inc(amt)
return counter_inc_fn
def get_hashed_url_filter_fn(predicate_fn):
def filter_fn(el):
url, _ = el
val = int(
hashlib.md5(tf.compat.as_text(url).encode("utf-8")).hexdigest(), 16)
return predicate_fn(val)
return filter_fn
_nltk_lock = threading.Lock()
def _load_sentence_tokenizer():
"""Returns a sentence tokenization function."""
nltk = tfds.core.lazy_imports.nltk
# Lock to avoid a race-condition in the creation of the download directory.
with _nltk_lock:
nltk.download("punkt")
return nltk.data.load("nltk:tokenizers/punkt/english.pickle")
def _get_sentences(text):
global _SENTENCE_TOKENIZER
if not _SENTENCE_TOKENIZER:
_SENTENCE_TOKENIZER = _load_sentence_tokenizer()
return list(_SENTENCE_TOKENIZER.tokenize(tf.compat.as_text(text)))
# Global lock used for language detection modules that aren't threadsafe.
langdetect_lock = threading.Lock()
def detect_english(page, min_probability=0.99):
"""Yields page iff text is 'en' with at least `min_probability`."""
url, features = page
text = features["text"]
counter_inc_fn = get_counter_inc_fn("english-filter")
langdetect = tfds.core.lazy_imports.langdetect
# Make langdetect predictions deterministic.
with langdetect_lock:
langdetect.DetectorFactory.seed = 0
try:
predictions = langdetect.detect_langs(text)
except langdetect.lang_detect_exception.LangDetectException:
counter_inc_fn("filtered:langdetect_exception")
return
if not predictions:
counter_inc_fn("filtered:no_predictions")
return
best_prediction = predictions[0]
if best_prediction.prob < min_probability:
counter_inc_fn("filtered:low_confidence")
return
if best_prediction.lang != "en":
counter_inc_fn("filtered:ignored_language")
counter_inc_fn("filtered:ignored_language-%s" % (best_prediction.lang,))
return
counter_inc_fn("passed")
features = dict(features)
features["language"] = "en"
yield url, features
def detect_languages(pages, valid_languages):
"""Predicts page language using cld3 and adds to features."""
beam = tfds.core.lazy_imports.apache_beam
class _PredictLanguageFn(beam.DoFn):
"""Predicts page's language using cld3 and adds to features."""
def __init__(self, valid_languages, min_probability=0.7):
self._valid_languages = set(valid_languages)
self._counter_inc_fn = get_counter_inc_fn("language-filter")
self._min_probability = min_probability
def start_bundle(self):
with langdetect_lock:
self._detector = tfds.core.lazy_imports.gcld3.NNetLanguageIdentifier(
# CLD3 is not expected to work well on very short documents.
min_num_bytes=100,
max_num_bytes=10000)
def process(self, page):
url, features = page
features = dict(features)
with langdetect_lock:
result = self._detector.FindLanguage(features["text"])
if not result.is_reliable:
self._counter_inc_fn("filtered:no_predictions")
lang = UNKNOWN_LANGUAGE
elif result.probability < self._min_probability:
self._counter_inc_fn("filtered:low_confidence")
lang = UNKNOWN_LANGUAGE
else:
lang = result.language
if lang not in self._valid_languages:
self._counter_inc_fn("filtered:ignored_language")
return
self._counter_inc_fn("passed")
self._counter_inc_fn("passed:%s" % lang)
features["language"] = lang
yield url, features
return pages | beam.ParDo(_PredictLanguageFn(valid_languages=valid_languages))
def get_clean_page_fn():
"""Returns `clean_page` with pre-compiled badword and citation regexes."""
# Used to filter citation from Wikipedia pages (among others).
citation_regex = re.compile(r"\[\d*\]|\[edit\]|\[citation needed\]")
return functools.partial(clean_page, citation_regex=citation_regex)
def clean_page(url_and_features,
citation_regex,
counter_inc_fn=None,
min_words_per_line=_MIN_WORDS_PER_LINE,
min_num_sentences=_MIN_NUM_SENTENCES,
max_word_length=_MAX_WORD_LENGTH):
"""Cleans a CommonCrawl page, yielding nothing if it should be skipped.
Cleaning removes lines with no end marks or with too few words. After line
filtering, pages are filtered out if they have too few sentences based on a
simple count of end marks.
Args:
url_and_features: tuple(string, dict), the url and features of the page.
citation_regex: Regex to use for finding Wikipedia-like citations to filter.
counter_inc_fn: function, a function taking the name of a counter to be
incremented and the (optional) amount. Defaults to a beam Metric counter.
min_words_per_line: int, the minimum number of words a line needs to not be
removed.
min_num_sentences: int, the minimum number of sentences a page needs to not
be skipped.
max_word_length: int, the maximum number of characters allowed in a word.
Lines containing a word with too many characters are removed.
Yields:
The url and cleaned text for the page.
"""
url, features = url_and_features
text = features["text"]
if not counter_inc_fn:
counter_inc_fn = get_counter_inc_fn("clean-page")
lines = text.splitlines()
valid_lines = []
num_sentences = 0
def line_has_too_long_word(line):
for word in line.split():
if len(word) > max_word_length:
return True
return False
for line in lines:
line = line.strip()
if line_has_too_long_word(line):
counter_inc_fn("line-filtered:too_long_word")
continue
line = citation_regex.sub("", line)
if not line.endswith(_END_MARKS) or line.endswith(_ELLIPSIS):
counter_inc_fn("line-filtered:no_endmark")
continue
if len(line.split()) < min_words_per_line:
counter_inc_fn("line-filtered:too_short")
continue
line_lower = line.lower()
# Remove documents which contain lorem ipsum
if "lorem ipsum" in line_lower:
counter_inc_fn("filtered:loremipsum")
return
# Remove "javascript must be enabled" notices
if "javascript" in line_lower:
counter_inc_fn("line-filtered:javascript")
continue
# Remove docs which probably contain javascript code
if "{" in line:
counter_inc_fn("filtered:squigglybracket")
return
# Remove policy lines
if any(p in line_lower for p in _POLICY_SUBSTRINGS):
counter_inc_fn("line-filtered:policy")
continue
num_sentences += len(_get_sentences(line))
valid_lines.append(line)
counter_inc_fn("line-passed")
if num_sentences < min_num_sentences:
counter_inc_fn("filtered:too_few_sentences")
return
counter_inc_fn("passed")
features["text"] = "\n".join(valid_lines).strip()
yield url, features
def _hash_text(text):
return hashlib.md5(tf.compat.as_text(text).encode("utf-8")).hexdigest()
def _emit_url_to_lines(page):
"""Emits url to all (lower-cased, hashed) lines."""
url, features = page
text = features["text"]
for line in text.split("\n"):
yield _hash_text(line.strip().lower()), url
def _remove_lines_from_text(el, counter_inc_fn, min_num_sentences):
"""Removes all lines from the page that do not match the given set of hashes.
Process the result of a join containing a single value for 'features' and zero
or more values for 'lines'. Each value in 'lines' is a lower-cased, hashed
line that has been selected to keep.
Args:
el: `(string, {'features': features_dict, 'lines': [string]})`, element
containing the result of a join on key with both the page text and
lower-cased, hashed lines to remove.
counter_inc_fn: function, a function taking the name of a counter to be
incremented and the (optional) amount.
min_num_sentences: int, the minimum number of sentences a page needs to not
be skipped.
Yields:
url: The URL of the page.
features: The page features with lines removed from text.
"""
url, join_values = el
features = join_values["features"]
assert len(features) == 1, "Invalid page count (%d) for %s" % (len(features),
url)
features = features[0]
text = features["text"]
lines_to_keep = set(join_values["lines"])
new_lines = []
hashed_lines = set()
for line in text.split("\n"):
hashed_line = _hash_text(line.strip().lower())
if hashed_line not in lines_to_keep:
counter_inc_fn("line-filtered:global_duplicate")
elif hashed_line in hashed_lines:
counter_inc_fn("line-filtered:local_duplicate")
else:
counter_inc_fn("line-passed")
new_lines.append(line)
hashed_lines.add(hashed_line)
new_text = "\n".join(new_lines)
if not new_text:
counter_inc_fn("filtered:empty")
return
if min_num_sentences and len(_get_sentences(new_text)) < min_num_sentences:
counter_inc_fn("filtered:too_few_sentences")
return
counter_inc_fn("passed")
new_features = features.copy()
new_features["text"] = new_text
yield (url, new_features)
def remove_duplicate_text(pages, min_num_sentences=_MIN_NUM_SENTENCES):
"""Utility to remove duplicate lines across text documents."""
# Output: url, lines
beam = tfds.core.lazy_imports.apache_beam
# Select a single URL for each line in the input pages.
# Hash before comparison to avoid biasing by domain.
# line, [url]
line_to_selected_url = (
pages
| beam.FlatMap(_emit_url_to_lines)
| beam.combiners.Top.PerKey(1, key=_hash_text, reverse=True))
# url, line
lines_to_keep = line_to_selected_url | beam.Map(lambda x: (x[1][0], x[0]))
# Output: url, text
final_docs = ({
"features": pages,
"lines": lines_to_keep
}
| "group_features_and_lines_by_url" >> beam.CoGroupByKey()
| beam.FlatMap(
_remove_lines_from_text,
counter_inc_fn=get_counter_inc_fn("dedupe-lines"),
min_num_sentences=min_num_sentences))
return final_docs
def split_wet_file(wet_file_path, counter_inc_fn=None):
"""Split a WET file into separate pages."""
logging.info("Splitting file: %s", wet_file_path)
if not counter_inc_fn:
counter_inc_fn = get_counter_inc_fn("split-wet-file")
counter_inc_fn("wet-file")
with tf.io.gfile.GFile(wet_file_path,
"rb") as f, gzip.GzipFile(fileobj=f) as g:
url = None
content = None
content_len = None
content_type = None
timestamp = None
def _maybe_get_page():
"""Generate a (url, {features}) page."""
if not url and url is not None:
counter_inc_fn("filtered:no_url")
if not content and content is not None:
counter_inc_fn("filtered:no_content")
if not content_type and content_type is not None:
counter_inc_fn("filtered:no_content_type")
if not content_len and content_len is not None:
counter_inc_fn("filtered:no_content_len")
if not timestamp and timestamp is not None:
counter_inc_fn("filtered:no_timestamp")
if content and url:
counter_inc_fn("passed")
return (url, {
"text": "\n".join(content),
"content-type": content_type,
"content-length": content_len,
"timestamp": timestamp,
"url": url
})
return None
for line in io.TextIOWrapper(g, encoding="utf-8"): # pytype: disable=wrong-arg-types
line = line.strip()
if not line:
continue
if line == _PAGE_DELIMITER:
page = _maybe_get_page()
if page:
yield page
url = ""
content = []
content_len = ""
content_type = ""
timestamp = ""
if line.startswith(_URL_KEY):
url = line[len(_URL_KEY):].strip()
if line.startswith(_URL_DATE):
timestamp = line[len(_URL_DATE):].strip()
if line.startswith(_CONTENT_TYPE):
content_type = line[len(_CONTENT_TYPE):].strip()
if line.startswith(_CONTENT_LEN):
content_len = line[len(_CONTENT_LEN):].strip()
if line.startswith(_METADATA_PREFIXES):
continue
content.append(line) # pytype: disable=attribute-error
page = _maybe_get_page()
if page:
yield page
def dedupe_urls(el):
"""Deterministically return as random page for a given URL."""
counter_inc_fn = get_counter_inc_fn("duplicate-url-filter")
url, pages = el
cnt = 0
page, page_hash = None, None
for p in pages:
cnt += 1
p_hash = _hash_text(p["text"])
if not page_hash or p_hash > page_hash:
page = p
page_hash = p_hash
counter_inc_fn("filtered", cnt - 1)
counter_inc_fn("passed")
return url, page
def is_valid_length(el, max_length=1.9e5):
"""Returns False iff page's text is too long."""
counter_inc_fn = get_counter_inc_fn("too-long-filter")
_, page = el
if len(page["text"]) > max_length:
counter_inc_fn("filtered")
return False
counter_inc_fn("passed")
return True
def is_realnews_domain(el, realnews_domains):
"""Returns False iff page's (sub)domain is not allowed."""
counter_inc_fn = get_counter_inc_fn("realnews-domain-filter")
url, _ = el
ext = tfds.core.lazy_imports.tldextract.extract(url)
main_domain = ext.domain + "." + ext.suffix
if main_domain not in realnews_domains:
counter_inc_fn("filtered:bad_domain")
return False
allowed_subdomains = realnews_domains[main_domain]
if (isinstance(allowed_subdomains, list) and
ext.subdomain not in allowed_subdomains):
counter_inc_fn("filtered:bad_subdomain")
return False
counter_inc_fn("passed")
return True
def filter_by_webtextlike(el):
"""Yields only pages with a matching WebText-like URL."""
counter_inc_fn = get_counter_inc_fn("webtextlike-filter")
url, join_values = el
text = join_values["text"]
webtextlike = join_values["webtextlike_urls"]
if not webtextlike:
counter_inc_fn("filtered")
return
if not text:
counter_inc_fn("missing-page")
return
assert len(text) == 1
counter_inc_fn("passed")
yield url, text[0]
def normalize_url(el):
url, val = el
url = tf.compat.as_text(url)
url = re.sub(r"https?:\/\/(www\.)?", "", url)
url = re.sub(r"\?(utm_|ref|feed).*", "", url)
url = url.rstrip("/")
return url, val
def get_badwords_filter_fn(badwords):
"""Filters pages that contain any language-specific bad words."""
badwords_regex = { # pylint:disable=g-complex-comprehension
lang: (
# For Chinese and Thai, match bad words regardless of context.
re.compile("|".join(words)) if lang in ("th", "zh")
# For other languages, match only when flanked by non-word chars.
else re.compile(r"(?:\W|^)({})(?:\W|$)".format("|".join(words))))
for lang, words in badwords.items()
}
def badwords_filter(page):
_, features = page
lang = features["language"].split("-")[0] # remove suffix if present
if lang in badwords_regex:
text = features["text"]
badwords_found = badwords_regex[lang].search(text.lower())
if badwords_found is not None:
get_counter_inc_fn("badwords-filter")("filtered")
get_counter_inc_fn("badwords-filter-%s" % lang)("filtered")
return False
get_counter_inc_fn("badwords-filter-%s" % lang)("passed")
get_counter_inc_fn("badwords-filter")("passed")
return True
return badwords_filter
def paragraph_filter(page, min_paragraphs=3, min_paragraph_len=200):
"""Returns False iff a page has too few or too short paragraphs."""
_, features = page
lines = features["text"].split("\n")
# Filter out docs that don't have at least three "paragraphs"
# (lines >= `min_paragraph_len` chars).
if (len(lines) < min_paragraphs or
min(heapq.nlargest(3, [len(l) for l in lines])) < min_paragraph_len):
get_counter_inc_fn("paragraph-filter")("filtered")
return False
get_counter_inc_fn("paragraph-filter")("passed")
return True
|
|
#! /usr/bin/python3
import sys
import time
import threading
import select
import signal
import fcntl
import os
import socket
import struct
import math
import argparse
import functools
from gpiozero import MCP3008
###
### Python debug
###
### If debug is needed you should uncomment the next line
#import pdb
### and place pdb.set_trace() call before the line you want stop execution
###
### Global constants and variables
###
SPI_PORT = 0
SPI_DEVICE = 0
DEF_PORT = 10000
RECV_BUFSIZ = 4096
PROTO_VER = 1
PROTO_AUTHTYPE_NONE = 1
PROTO_UNUSED = 0
PROTO_HDRSIZ = 4
PROTO_CMD_GETLAST = 1
PROTO_CMD_RETLAST = 2
SYSFPARAM_RPI_PROC_TEMP_KEY = 0
SYSFPARAM_RPI_PROC_TEMP_FNAME = '/sys/class/thermal/thermal_zone0/temp'
MIB_BASE = '.1.3.6.1.3.999'
MIB_DEVTYPE_MCP3008 = 1 # ADC device type of Analog Devices MCP3008
MIB_MAX_ACCESS_NA = 0 # not-accessible
MIB_MAX_ACCESS_AN = 1 # accessible-for-notify
MIB_MAX_ACCESS_RO = 2 # read-only
MIB_MAX_ACCESS_RW = 3 # read-write
MIB_MAX_ACCESS_RC = 4 # read-create
MIB_SYNTAX_INT = 1 # INTEGER (or Integer32)
MIB_SYNTAX_STRING = 2 # OCTET STRING
MIB_SYNTAX_OID = 3 # OBJECT IDENTIFIER
MIB_SYNTAX_BITS = 4 # BITS construct
MIB_SYNTAX_IP = 5 # IpAddress
MIB_SYNTAX_COUNT = 6 # Counter32
MIB_SYNTAX_GAUGE = 7 # Gauge32
MIB_SYNTAX_TT = 8 # TimeTicks
MIB_SYNTAX_COUNT64 = 9 # Counter64
MIB_SYNTAX_UINT = 10 # Unsigned32
MIB_SYNTAX_SEQ = 11 # SEQUENCE
MIB_SYNTAX_NAMES = {
MIB_SYNTAX_INT: 'integer',
MIB_SYNTAX_STRING: 'string',
MIB_SYNTAX_OID: 'objectid',
MIB_SYNTAX_BITS: 'integer',
MIB_SYNTAX_IP: 'ipaddress',
MIB_SYNTAX_COUNT: 'counter',
MIB_SYNTAX_GAUGE: 'gauge',
MIB_SYNTAX_TT: 'timeticks',
MIB_SYNTAX_COUNT64: 'counter',
MIB_SYNTAX_UINT: 'integer',
MIB_SYNTAX_SEQ: 'string'
}
VREF = 3324 # voltage reference fed into VREF (pin 15) of MCP3008, in mV
R_PU = 9920 # pull-up resistor of voltage divider, in Ohms
R_PD = 9930 # pull-down resistor of voltage divider, in Ohms
DEF_AVG_SAMPLES = 5 # default number of samples taken out of each channel
DEF_AVG_DELTA = .001 # default delta between samples, in seconds
DEF_FACTOR = 1 # default scale factor for channel data
MIN_AVG_SAMPLES = 1 # minimal number of number of samples
MIN_AVG_DELTA = 0 # minimal delta between samples
DEF_MEASURE_INT = 10 # default interval between measurements, in seconds
MIVAL_SHIFT = .1 # constant time shift subtracted from calculated start time of measurement circle, in seconds
TS_FORMAT = '%Y-%m-%d %H:%M:%S' # output date/time format
DEBUG_TS_FORMAT = '%Y-%m-%d %H:%M:%S' # output date/time format for debug output
SIG_WAKEUP_FD_RLEN = 8 # length of data read from signal wakeup file descriptor
channels_conf = { # MCP3008 channels list
0: 'MAIN',
1: 'REG',
2: 'BAT',
3: '+5V'
}
mib = {}
oids = []
sysfparams = {}
pid = 0
ts_base = float(0)
###
### Debug output
###
def dbg(message):
if debug:
caller = sys._getframe(1).f_code.co_name
ts = round(time.time(), 3)
ts_string = time.strftime(DEBUG_TS_FORMAT, time.localtime(ts))
ts_fraction = int(math.modf(ts)[0] * 1000)
debug_fileobj.write('DEBUG[{}]: {}.{:03d}: {}: {}\n'.format(pid, ts_string, ts_fraction, caller, message))
debug_fileobj.flush()
###
### MIB var record
###
class MIBVar:
# Constructor
def __init__(self, name, oid, handler=lambda: None, max_access=MIB_MAX_ACCESS_RO, syntax=MIB_SYNTAX_STRING,
timeticks_conv=False):
# Class variables
self._name = name # name
self._oid = oid # OID (relative to MIB_BASE)
self._next_oid = None # OID of next MIBVar object (needed for GETNEXT SNMP request)
self._handler = handler # handler function for processing of GET SNMP equest
self._max_access = max_access # access type of MIBVar
self._syntax = syntax # syntax of MIBVar object
self._timeticks_conv = timeticks_conv # timeticks conversion indicator
# Return name
def get_name(self):
return self._name
# Return OID
def get_oid(self):
return self._oid
# Return OID of next MIB variable
def get_successor(self):
return self._next_oid
# Return MAX-ACCESS property
def get_max_access(self):
return self._max_access
# Return SYNTAX property
def get_syntax(self):
return self._syntax
# Return MIB value
def get_value(self):
if not self._handler or self._max_access < MIB_MAX_ACCESS_RO:
return None
else:
val = self._handler()
if val is not None:
if self._syntax == MIB_SYNTAX_TT and self._timeticks_conv and type(val).__name__ == 'float':
val = round((val - ts_base) * 100)
elif type(val).__name__ == 'bool':
val = int(val)
return val
# Set OID of next MIB variable
def set_successor(self, oid):
self._next_oid = oid
###
### Compare two OIDs, return -1, 0, 1 or None in case of wrong format
###
def cmp_oids(oid1, oid2):
# Strip leading and trailing dots, split to lists
list1 = oid1.strip('.').split('.')
list2 = oid2.strip('.').split('.')
# Loop synchronously through the both lists until one of them ends
while len(list1) and len(list2):
# Shift first elements from lists
e1 = list1.pop(0)
e2 = list2.pop(0)
# Replace empty strings with zero
if not e1:
e1 = '0'
if not e2:
e2 = '0'
# Check for non digits
if not e1.isdigit() or not e2.isdigit():
return None
# Convert to integer and compare elements
i1 = int(e1)
i2 = int(e2)
if i1 < i2:
return -1
elif i1 > i2:
return 1
# Compare length of the both lists
if len(list1):
return 1
elif len(list2):
return -1
else:
return 0
###
### Init MIB and list of OIDs
###
def mib_init(ch_list):
global mib, oids
# Base tree
mib[''] = MIBVar('snGroup', '', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
# ADC subtree
mib['.1'] = MIBVar('snAdc', '.1', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
# Devices table
mib['.1.1.0'] = MIBVar('snAdcDevNumber', '.1.1.0', handler=lambda: 1, syntax=MIB_SYNTAX_INT)
mib['.1.2'] = MIBVar('snAdcDevTable', '.1.2', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
mib['.1.2.1'] = MIBVar('snAdcDevEntry', '.1.2.1', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
p = '.1.2.1.'
# MCP3008 on PCB
s = '.1'
o = p + str(1) + s
mib[o] = MIBVar('devSpiPort', o, handler=lambda: SPI_PORT, syntax=MIB_SYNTAX_INT)
o = p + str(2) + s
mib[o] = MIBVar('devSpiDevice', o, handler=lambda: SPI_DEVICE, syntax=MIB_SYNTAX_INT)
o = p + str(3) + s
mib[o] = MIBVar('devType', o, handler=lambda: MIB_DEVTYPE_MCP3008, syntax=MIB_SYNTAX_INT)
o = p + str(4) + s
mib[o] = MIBVar('devName', o, handler=lambda: 'MCP3008 on PCB', syntax=MIB_SYNTAX_STRING)
o = p + str(5) + s
mib[o] = MIBVar('devChanNumber', o, handler=lambda: len(channels_conf), syntax=MIB_SYNTAX_INT)
# Statistics Table
mib['.1.3.0'] = MIBVar('snAdcStatsNumber', '.1.3.0', handler=lambda: 1, syntax=MIB_SYNTAX_INT)
mib['.1.4'] = MIBVar('snAdcStatsTable', '.1.4', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
mib['.1.4.1'] = MIBVar('snAdcStatsEntry', '.1.4.1', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
p = '.1.4.1.'
# Single statistics block
s = '.1'
o = p + str(1) + s
mib[o] = MIBVar('statsValid', o, handler=ch_list.valid, syntax=MIB_SYNTAX_INT)
o = p + str(2) + s
mib[o] = MIBVar('statsTsStart', o, handler=ch_list.ts_start, syntax=MIB_SYNTAX_TT, timeticks_conv=True)
o = p + str(3) + s
mib[o] = MIBVar('statsTsComplete', o, handler=ch_list.ts_complete, syntax=MIB_SYNTAX_TT, timeticks_conv=True)
# Channels table
mib['.1.5.0'] = MIBVar('snAdcChanNumber', '.1.5.0', handler=ch_list.num_of_channels, syntax=MIB_SYNTAX_INT)
mib['.1.6'] = MIBVar('snAdcChanTable', '.1.6', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
mib['.1.6.1'] = MIBVar('snAdcChanEntry', '.1.6.1', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
p = '.1.6.1.'
# Single channels block
s = '.1.1.'
for i in ch_list.sorted_ch_nums():
ch_instance = ch_list.vec(i)
s1 = s + str(i + 1)
o = p + str(1) + s1
mib[o] = MIBVar('chanNumber', o, handler=ch_instance.get_num, syntax=MIB_SYNTAX_INT)
o = p + str(2) + s1
mib[o] = MIBVar('chanName', o, handler=ch_instance.get_label, syntax=MIB_SYNTAX_STRING)
o = p + str(3) + s1
mib[o] = MIBVar('chanValid', o, handler=ch_instance.get_valid, syntax=MIB_SYNTAX_INT)
o = p + str(4) + s1
mib[o] = MIBVar('chanLast', o, handler=ch_instance.get_last, syntax=MIB_SYNTAX_GAUGE)
o = p + str(5) + s1
mib[o] = MIBVar('chanTs', o, handler=ch_instance.get_ts, syntax=MIB_SYNTAX_TT, timeticks_conv=True)
# Raspberry Pi subtree
mib['.2'] = MIBVar('snRPi', '.2', max_access=MIB_MAX_ACCESS_NA, syntax=MIB_SYNTAX_OID)
# Raspberry Pi processor temperature
mib['.2.1.0'] = MIBVar('snRPiTemperature', '.2.1.0', handler=sysfparams[SYSFPARAM_RPI_PROC_TEMP_KEY].read,
syntax=MIB_SYNTAX_GAUGE)
# Make sorted list of OIDs
oids = sorted(mib.keys(), key=functools.cmp_to_key(cmp_oids))
# Fill in next OIDs
for i, o in enumerate(oids[:-1]):
mib[o].set_successor(oids[i + 1])
###
### Test full OID if it fits under MIB base
### If so return stripped OID or None otherwise
###
def strip_full_oid(full_oid):
# Set default return value
ret = None
# Strip MIB base
if full_oid.find(MIB_BASE) == 0:
oid = full_oid[len(MIB_BASE):]
# Test for stripped OID is legal (empty or begins with dot)
if len(oid) == 0 or oid[0] == '.':
ret = oid
return ret
###
### Find MIBVar object by OID
### Return MIBVar object or None in the case of OID not exist
###
def find_mibvar(full_oid, mib):
# Strip MIB base
oid = strip_full_oid(full_oid)
if oid is None:
return None
# Get MIBVar object by OID, it'll be None if not existed
mibvar = mib.get(oid)
return mibvar
###
### Find next accessible MIBVar object by OID
### Return MIBVar object or None in the case of next OID not exist
###
def find_mibvar_next(full_oid, mib, oids):
# Strip MIB base
oid = strip_full_oid(full_oid)
if oid is None:
return None
# Try to get MIBVar object by given OID
existed = mib.get(oid)
# If MIBVar object with given OID exist then candidate object is its successor
if existed:
candidate_oid = existed.get_successor()
candidate = mib.get(candidate_oid)
# If MIBVar object with given OID not existed ...
else:
# If given OID is lesser than first OID in current list then candidate object is the first one
if cmp_oids(oid, oids[0]) < 0:
candidate = mib.get(oids[0])
# If given OID is greater than last OID in current list then there are no objects next to the last one, return None
elif cmp_oids(oid, oids[-1]) > 0:
return None
# Run binary search for finding candidate object
else:
p_low = 0
p_high = len(oids) - 1
p_delta = p_high - p_low
while p_delta > 1:
p_check = p_low + int(p_delta / 2)
if cmp_oids(oid, oids[p_check]) < 0:
p_high = p_check
else:
p_low = p_check
p_delta = p_high - p_low
candidate = mib.get(oids[p_high])
# The first accessible OID in chain of successors starting from candidate is next OID
while candidate and candidate.get_max_access() < MIB_MAX_ACCESS_RO:
candidate = candidate.get_successor()
return candidate
###
### Channel record
###
class Channel:
# Constructor
def __init__(self, num=None, label=''):
# Class variables
self._num = num # channel number on MCP3008
self._label = label # symbolic channel name
self._adc = None # instance of MCP3008 class
self._acc = 0 # accumulator for consecutive measurements
self._samples = 0 # current number of samples in one measurement circle
self._valid = False # valid flag
self._last = None # last calculated average value
self._ts = None # timestamp of last calculation
dbg('Calling MCP3008 class constructor for channel {}, port {} device {}'.format(self._num, SPI_PORT, SPI_DEVICE))
self._adc = MCP3008(channel=self._num, port=SPI_PORT, device=SPI_DEVICE)
dbg('MCP3008 object created for channel {}'.format(self._num))
# Reset accumulator
def reset_acc(self):
self._acc = 0
self._samples = 0
# Read current value and add it to accumulator
def add_acc(self):
self._acc += self._adc.value
self._samples += 1
# Calculate average of accumulator and store it
def avg_acc(self, factor=1):
self._valid = False
self._last = round(self._acc * factor / self._samples)
self._ts = time.time()
self._valid = True
# Return last value and its properties (valid indicator and timestamp)
def get_lastprop(self):
return self._valid, self._last, self._ts
# Return channel number
def get_num(self):
return self._num
# Return channel label
def get_label(self):
return self._label
# Return last value
def get_last(self):
return self._last
# Return valid indicator
def get_valid(self):
return self._valid
# Return timestamp
def get_ts(self):
return self._ts
# Clean-up
def destroy(self):
self._adc.close()
###
### Channel list record
###
class ChannelList:
# Constructor
def __init__(self, samples=DEF_AVG_SAMPLES, delta=DEF_AVG_DELTA, factor=DEF_FACTOR):
# Class variables
self._vec = {} # instances of Channel class
self._sorted_ch_nums = [] # sorted list of channel numbers
self._samples = DEF_AVG_SAMPLES # number of samples in one measurement circle
self._delta = DEF_AVG_DELTA # interval between consecutive measurements in one circle
self._factor = DEF_FACTOR # coefficient
self._valid = False # valid flag
self._ts_start = None # timestamp of starting of last measurement circle
self._ts_complete = None # timestamp of completing of last measurement circle
self._lock = None # locked when measurement circle runs
self._last_values = {} # last values
self.set_parms(samples=samples, delta=delta, factor=factor)
self._lock = threading.Lock()
# Check and set parameters
def set_parms(self, samples=None, delta=None, factor=None):
if samples and type(samples).__name__ == 'int' and samples > MIN_AVG_SAMPLES:
self._samples = samples
if delta and (type(delta).__name__ in ['int', 'float']) and delta > MIN_AVG_DELTA:
self._delta = delta
if factor and (type(factor).__name__ in ['int', 'float']):
self._factor = factor
# Add channel to list
def add_ch(self, num=None, label=''):
dbg('Start adding channel {} with label {}'.format(num, label))
if type(num).__name__ == 'int' and num in range(0, 8) and num not in self._vec.keys():
self._vec[num] = Channel(num=num, label=label)
self._sorted_ch_nums = sorted(self._vec.keys())
self._last_values[num] = self._vec[num].get_lastprop()
return 0
else:
return -1
# Remove channel from list
def rem_ch(self, num=None):
dbg('Start removing channel {}'.format(num))
if type(num).__name__ == 'int' and num in self._vec.keys():
self._vec[num].destroy()
del self._vec[num]
self._sorted_ch_nums = sorted(self._vec.keys())
del self._last_values[num]
return 0
else:
return -1
# Reset all channel accumulators
def reset(self):
for ch in self._sorted_ch_nums:
self._vec[ch].reset_acc()
# Read all channels
def read(self):
for ch in self._sorted_ch_nums:
self._vec[ch].add_acc()
# Calculate average value for all channels and load its
def average(self):
for ch in self._sorted_ch_nums:
self._vec[ch].avg_acc(factor=self._factor)
self._last_values[ch] = self._vec[ch].get_lastprop()
# Measure circle
def measure(self):
# Acquire lock (nonblocking)
if self._lock.acquire(blocking=False):
try:
# Reset valid flag, save starting timestamp, reset all channel accunulators
self._valid = False
self._ts_start = time.time()
self.reset()
# Start measurement circle
i = self._samples
while i:
# Read all channels
self.read()
# Decrement iteration counter and sleep-or-avg_and_break
i -= 1
if i:
time.sleep(self._delta)
else:
self.average()
break
finally:
# Set valid flag, save final timestamp and release lock
self._ts_complete = time.time()
self._valid = True
self._lock.release()
return 0
else:
return -1
# Return last values
def last(self):
return self._valid, self._ts_start, self._ts_complete, self._last_values
# Return number of channels
def num_of_channels(self):
return len(self._sorted_ch_nums)
# Return sorted list of channel numbers
def sorted_ch_nums(self):
return self._sorted_ch_nums
# Return Channel object
def vec(self, ch):
return self._vec.get(ch)
# Return valid indicator
def valid(self):
return self._valid
# Return timestamp of starting of last measurement circle
def ts_start(self):
return self._ts_start
# Return timestamp of completing of last measurement circle
def ts_complete(self):
return self._ts_complete
# Clean-up
def destroy(self):
for ch in self._sorted_ch_nums.copy():
self.rem_ch(num=ch)
###
### System parameter record (parameter value read from file)
###
class SysFParam:
# Constructor
def __init__(self, fname=None, ptype='str'):
# Class variables
self._fname = fname # path to file
self._f = None # file object
self._opened = False # opened flag
self._ptype = ptype # parameter type, default: 'str'
def open(self):
if self._fname and not self._opened:
try:
self._f = open(self._fname, 'r')
self._opened = True
except OSError as err:
sys.stderr.write('WARN: Error opening file "{}": {}\n'.format(self._fname, err))
def read(self):
ret = None
if self._opened:
try:
self._f.seek(0)
line = self._f.readline()
if line:
line = line.rstrip('\n')
if self._ptype == 'int':
try:
ret = int(line)
except (TypeError, ValueError):
ret = 0
else:
ret = line
except OSError as err:
sys.stderr.write('WARN: Error reading file "{}": {}\n'.format(self._fname, err))
return ret
def destroy(self):
if self._opened:
self._f.close()
self._opened = False
###
### Signal handler
###
def signal_handler(signal, frame):
dbg('Signal caught: ' + str(signal))
return
###
### Cleanup
###
def cleanup():
dbg('Clean-up called')
sys.stderr.write('INFO: Clean-up\n')
signal.setitimer(signal.ITIMER_REAL, 0)
for v in sysfparams.values():
v.destroy()
ch_list.destroy()
poller.close()
os.close(pipe_r)
os.close(pipe_w)
if debug:
debug_fileobj.close()
sock.close()
###
### Run measure circle
###
def run_measure_circle(print_table=False):
# Run measure circle
if ch_list.measure():
sys.stderr.write('ERROR: Measure circle failed\n')
# Print channel values
elif print_table:
valid, ts_start, ts_complete, last_values = ch_list.last()
if valid:
ts_complete = round(ts_complete, 6)
ts_string = time.strftime(TS_FORMAT, time.localtime(ts_complete))
ts_fraction = int(math.modf(ts_complete)[0] * 1000000)
ts_diff = round((ts_complete - ts_start) * 1000000)
sys.stdout.write('{}.{:06d}, {:7d} us, '.format(ts_string, ts_fraction, ts_diff))
for ch in sorted_channels:
valid, last, ts = last_values[ch]
if valid:
sys.stdout.write('{}: {:+.2f}, '.format(channels_conf[ch], last/1000))
else:
sys.stdout.write('{}: Nan '.format(channels_conf[ch]))
sys.stdout.write('\n')
###
### Process request
###
def process_message(data, client_address):
global in_pkts_total, in_pkts_valid, in_pkts_bad, in_pkts_bad_ver, in_pkts_bad_len, in_pkts_bad_cmd
global out_pkts_total, out_pkts_success, out_pkts_failed
in_pkts_total += 1
# Check if message header has correct length
if len(data) < PROTO_HDRSIZ:
in_pkts_bad += 1
in_pkts_bad_len += 1
return
# Parse message header
start = 0
ver, authtype, unused, cmd = struct.unpack('>BBBB', data[start:PROTO_HDRSIZ])
start += PROTO_HDRSIZ
if ver != PROTO_VER:
in_pkts_bad += 1
in_pkts_bad_ver += 1
return
# Received request for last values
if cmd == PROTO_CMD_GETLAST:
if data[start:]:
in_pkts_bad += 1
in_pkts_bad_len += 1
else:
in_pkts_valid += 1
# Prepare last values
valid, ts_start, ts_complete, last_values = ch_list.last()
retlast_pdu = retlast_hdr
retlast_pdu += struct.pack('>BBdd', len(sorted_channels), valid, ts_start, ts_complete)
for ch in sorted_channels:
valid, last, ts = last_values[ch]
retlast_pdu += struct.pack('>BBLd', ch, valid, last, ts)
retlast_len = len(retlast_pdu)
# Send PDU with last values
try:
sent = sock.sendto(retlast_pdu, client_address)
out_pkts_total += 1
if sent == retlast_len:
out_pkts_success += 1
else:
out_pkts_failed += 1
except OSError as err:
sys.stderr.write('Error sending to "{}:{}": {0}\n'.format(client_address[0], client_address[1], err))
cleanup()
sys.exit(1)
else:
in_pkts_bad += 1
in_pkts_bad_cmd += 1
###
### Main program starts here
###
# Get PID
pid = os.getpid()
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--server', '-s', help='name or address to bind to [single,optional]', default='')
parser.add_argument('--port', '-p', help='port number [single,optional]', type=int, default=DEF_PORT)
parser.add_argument('--minterval', '-m', help='measurement interval in seconds [single,optional]', type=int, default=DEF_MEASURE_INT)
parser.add_argument('--verbose', '-v', help='print extra messages [single,optional]', action='store_true', default=False)
parser.add_argument('--snmp-agent', '-a', help='act as SNMP agent [single,optional]', action='store_true', default=False)
parser.add_argument('--debug', '-d', help='debug output to file [single,optional]')
args = parser.parse_args()
bind_address = args.server
bind_port = args.port
mival = args.minterval
verbose = args.verbose
# SNMP agent
snmp_agent = args.snmp_agent
if snmp_agent:
print_table = False
else:
print_table = True
# Debug
if args.debug:
debug = True
debug_filename = args.debug
try:
debug_fileobj = open(debug_filename, mode='a')
except OSError as err:
sys.stderr.write('WARN: Error opening debug file "{}": {}\n'.format(debug_filename, err))
debug = False
else:
debug = False
dbg('Debug started')
dbg('Arguments: bind_address={}, bind_port={}, mival={}, verbose={}, snmp_agent={}, print_table={}'.format(
bind_address, bind_port, mival, verbose, snmp_agent, print_table))
dbg('RUID: {}, EUID: {}, SUID: {}, RGID: {}, EGID:{}, SGID: {}'.format(*(os.getresuid() + os.getresgid())))
dbg('Group IDs: {}'.format(repr(os.getgroups())))
# Save base time
ts_base = time.time()
dbg('Saved base timestamp: {}'.format(ts_base))
# We should pass handshake quickly
# We don't want to block while read from stdin if acting as SNMP agent
if snmp_agent:
# Handshake
try:
dbg('Waiting handshake')
line = sys.stdin.readline()
dbg('Read line from stdin: {}'.format(repr(line)))
except OSError as err:
sys.stderr.write('Error reading stdin: {}\n'.format(err))
sys.exit(1)
if line == 'PING\n':
dbg('Got PING request, sending PONG reply')
sys.stdout.write('PONG\n')
sys.stdout.flush()
sys.stderr.write('INFO: Passed PING/PONG handshake\n')
else:
sys.stderr.write('Unrecognized handshake received\n'.format(err))
sys.exit(1)
# Unblock stdin
dbg('Setting stdin to non-blocking')
stdin_fd = sys.stdin.fileno()
flags = fcntl.fcntl(stdin_fd, fcntl.F_GETFL, 0)
fcntl.fcntl(stdin_fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
# Check bind address
if bind_address:
dbg('Getting IP address for "{}"'.format(bind_address))
try:
bind_address = socket.gethostbyname(bind_address)
except OSError as err:
sys.stderr.write('Error getting address for "{}": {0}\n'.format(bind_address, err))
sys.exit(1)
# Create a UDP/IP socket
dbg('Opening non-blocking UDP/IP socket')
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM | socket.SOCK_NONBLOCK)
sock_fd = sock.fileno()
except OSError as err:
sys.stderr.write('Error creating socket: {0}\n'.format(err))
sys.exit(1)
# Bind the socket to the port
dbg('Binding socket fd={} to {}:{}'.format(sock_fd, bind_address, bind_port))
try:
sock.bind((bind_address, bind_port))
except OSError as err:
sock.close()
sys.stderr.write('Error binding socket to "{}:{}": {0}\n'.format(bind_address, bind_port, err))
sys.exit(1)
# Initialize statistics
in_pkts_total = 0
in_pkts_valid = 0
in_pkts_bad = 0
in_pkts_bad_ver = 0
in_pkts_bad_len = 0
in_pkts_bad_cmd = 0
out_pkts_total = 0
out_pkts_success = 0
out_pkts_failed = 0
# Initialize signal file descriptor
# We must set write end of pipe to non blocking mode
# Also we don't want to block while read signal numbers from read end
dbg('Creating pipe for catch signals and making its file descriptors non-blocking')
pipe_r, pipe_w = os.pipe()
flags = fcntl.fcntl(pipe_w, fcntl.F_GETFL, 0)
fcntl.fcntl(pipe_w, fcntl.F_SETFL, flags | os.O_NONBLOCK)
signal.set_wakeup_fd(pipe_w)
flags = fcntl.fcntl(pipe_r, fcntl.F_GETFL, 0)
fcntl.fcntl(pipe_r, fcntl.F_SETFL, flags | os.O_NONBLOCK)
dbg('Pipe created, pipe_r={}, pipe_w={}'.format(pipe_r, pipe_w))
# Redefine signal handlers
dbg('Redefining signal handlers')
signal.signal(signal.SIGALRM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Create poller and register file descriptors
dbg('Creating poller and register file descriptors')
poller = select.epoll()
poller.register(pipe_r, select.EPOLLIN)
poller.register(sock_fd, select.EPOLLIN)
if snmp_agent:
poller.register(stdin_fd, select.EPOLLIN)
# Calculate coefficient
factor = VREF * (R_PU + R_PD) / R_PD
# Prepare header for RETLAST PDU
retlast_hdr = struct.pack('>BBBB', PROTO_VER, PROTO_AUTHTYPE_NONE, PROTO_UNUSED, PROTO_CMD_RETLAST)
# Initialize channel list
dbg('Start initializing channel list, {} channels in configuration'.format(len(channels_conf)))
sys.stderr.write('INFO: Initializing channels\n')
ch_list = ChannelList(factor=factor)
sorted_channels = sorted(channels_conf.keys())
for ch in sorted_channels:
dbg('Start initializing channel {}'.format(ch))
if ch_list.add_ch(num=ch, label=channels_conf[ch]):
sys.stderr.write('ERROR: Failed to add channel number {}, label {}\n'.format(ch, channels_conf[ch]))
sys.exit(1)
dbg('Channel list initialized, {} channels'.format(ch_list.num_of_channels()))
# Initialize system parameter instances
dbg('Start initializing system parameter instances')
sysfparams[SYSFPARAM_RPI_PROC_TEMP_KEY] = SysFParam(fname=SYSFPARAM_RPI_PROC_TEMP_FNAME, ptype='int')
sysfparams[SYSFPARAM_RPI_PROC_TEMP_KEY].open()
# Initialize MIB objects and list of OIDs
if snmp_agent:
dbg('Start initializing MIB, base={}'.format(MIB_BASE))
mib_init(ch_list)
dbg('MIB initialized, {} variables'.format(len(mib)))
# Set interval timer
# Initial value of timer bounded to measurement interval
dbg('Setting interval timer, {} s'.format(mival))
t = time.time()
t_rest = mival - t % mival - MIVAL_SHIFT
if t_rest < 0:
t_rest += mival
signal.setitimer(signal.ITIMER_REAL, t_rest, mival)
# Main loop
dbg('Starting main loop')
sys.stderr.write('INFO: Entering main loop\n')
while True:
# Wait for events and process its
try:
dbg('Calling poller')
events = poller.poll()
dbg('Poller returned {} events'.format(len(events)))
except InterruptedError:
continue
for fd, flags in events:
dbg('Start processing event, fd={}, flags={}'.format(fd, flags))
# Signal received, extract signal numbers from wakeup fd
if fd == pipe_r and flags & select.EPOLLIN:
dbg('Signal received from wakeup fd, unpacking signal numbers')
data = os.read(pipe_r, SIG_WAKEUP_FD_RLEN)
signums = struct.unpack('{}B'.format(len(data)), data)
dbg('Signal numbers unpacked: {}'.format(signums))
# Process signals
for signum in signums:
if signum == signal.SIGALRM:
dbg('Got SIGALRM, running measure circle')
run_measure_circle(print_table)
elif signum == signal.SIGINT:
dbg('Got SIGINT, terminating')
sys.stderr.write('\nSIGINT received\n')
cleanup()
sys.exit(0)
elif signum == signal.SIGTERM:
dbg('Got SIGTERM, terminating')
sys.stderr.write('\nSIGTERM received\n')
cleanup()
sys.exit(0)
elif signum == signal.SIGHUP:
dbg('Got SIGHUP, ignoring')
sys.stderr.write('SIGHUP received\n')
else:
dbg('Got uncaught signal {}, ignoring'.format(signum))
sys.stderr.write('ERROR: Unexpected signal received: {}\n'.format(signum))
# Data available on stdin if acting as SNMP agent
elif snmp_agent and fd == stdin_fd and flags & select.EPOLLIN:
dbg('Data available on stdin, reading')
lines = sys.stdin.readlines()
dbg('Read {} lines from stdin: {}'.format(len(lines), repr(''.join(lines))))
if not lines:
sys.stderr.write('ERROR: Catched event on stdin but no lines read\n')
continue
first = lines.pop(0)
# Handshake
if first == 'PING\n':
dbg('Got PING request, sending PONG reply')
sys.stdout.write('PONG\n')
sys.stdout.flush()
sys.stderr.write('INFO: Passed PING/PONG handshake\n')
# GET request
elif first == 'get\n':
dbg('Start processing GET request')
if lines:
oid = lines.pop(0).rstrip('\n')
dbg('GET: Extracted OID: {}'.format(oid))
mibvar = find_mibvar(oid, mib)
if mibvar:
dbg('GET: Found MIBVar object')
if mibvar.get_max_access() < MIB_MAX_ACCESS_RO:
dbg('GET: MIBVar not accessible'.format(oid))
sys.stderr.write('WARN: MIB variable with OID {} not accessible\n'.format(oid))
sys.stdout.write('NONE\n')
sys.stdout.flush()
else:
dbg('GET: MIBVar accessible, trying to get value')
val = mibvar.get_value()
if val is not None:
dbg('GET: Got value of MIBVar, constructing reply')
syn = mibvar.get_syntax()
synname = MIB_SYNTAX_NAMES[syn]
reply = '{}\n{}\n{}\n'.format(oid, synname, str(val))
dbg('GET: Sending reply to stdout: {}'.format(repr(reply)))
sys.stdout.write(reply)
sys.stdout.flush()
else:
dbg('GET: Read of MIBVar failed')
sys.stderr.write('WARN: Read of MIB variable with OID {} returned None\n'.format(oid))
sys.stdout.write('NONE\n')
sys.stdout.flush()
else:
dbg('GET: MIBVar object not found')
sys.stderr.write('WARN: MIB variable with OID {} not existed\n'.format(oid))
sys.stdout.write('NONE\n')
sys.stdout.flush()
else:
dbg('Malformed GET request')
sys.stderr.write('WARN: Malformed GET request, no OID\n')
sys.stdout.write('NONE\n')
sys.stdout.flush()
# GETNEXT request
elif first == 'getnext\n':
dbg('Start processing GETNEXT request')
if lines:
oid = lines.pop(0).rstrip('\n')
dbg('GETNEXT: Extracted OID: {}'.format(oid))
mibvar_next = find_mibvar_next(oid, mib, oids)
if mibvar_next:
dbg('GETNEXT: Found next MIBVar object, trying to get value')
val_next = mibvar_next.get_value()
if val_next is not None:
dbg('GETNEXT: Got value of next MIBVar, constructing reply')
oid_next = MIB_BASE + mibvar_next.get_oid()
syn_next = mibvar_next.get_syntax()
synname_next = MIB_SYNTAX_NAMES[syn_next]
reply = '{}\n{}\n{}\n'.format(oid_next, synname_next, str(val_next))
dbg('GETNEXT: Sending reply to stdout: {}'.format(repr(reply)))
sys.stdout.write(reply)
sys.stdout.flush()
else:
dbg('GETNEXT: Read of next MIBVar failed')
sys.stderr.write('WARN: Read of MIB variable with OID {} returned None\n'.format(oid_next))
sys.stdout.write('NONE\n')
sys.stdout.flush()
else:
dbg('GETNEXT: next MIBVar object not found')
sys.stderr.write('WARN: There is no accessible MIB variable next to OID {}\n'.format(oid))
sys.stdout.write('NONE\n')
sys.stdout.flush()
else:
dbg('Malformed GETNEXT request')
sys.stderr.write('WARN: Malformed GETNEXT request, no OID\n')
sys.stdout.write('NONE\n')
sys.stdout.flush()
# Unknown request
else:
dbg('Unrecognized request received on stdin')
sys.stderr.write('ERROR: Unrecognized request received on stdin\n')
sys.stdout.write('NONE\n')
sys.stdout.flush()
# Data available on socket
elif fd == sock_fd and flags & select.EPOLLIN:
dbg('Data available on socket, trying to read')
try:
data, client_address = sock.recvfrom(RECV_BUFSIZ)
except OSError as err:
dbg('Error receiving from socket "{}:{}": {0}'.format(bind_address, bind_port, err))
sys.stderr.write('Error receiving from socket "{}:{}": {0}\n'.format(bind_address, bind_port, err))
cleanup()
sys.exit(1)
# Process message
if data:
dbg('Read {} bytes of data from {}:{}'.formta(len(data), client_address[0], client_address[1]))
process_message(data, client_address)
# Unexpected event
else:
dbg('Unexpected event on fd {}, flags {}'.format(fd, flags))
sys.stderr.write('ERROR: Unexpected event on fd {}, flags {}\n'.format(fd, flags))
# This point should be never reached
# Cleanup and exit
cleanup()
sys.exit(0)
|
|
# -*- coding: utf8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import re
import os
import sys
import cgi
import codecs
from .core import *
from ._compat import urllib, URLError
def usage():
return """Usage: %(progname)s -s STOPLIST [OPTIONS] [HTML_FILE]
Convert HTML to plain text and remove boilerplate.
-o OUTPUT_FILE if not specified, output is written to stdout
--encoding=... default character encoding to be used if not specified
in the HTML meta tags (default: %(default_encoding)s)
--enc-force force specified encoding, ignore HTML meta tags
--enc-errors=... errors handling for character encoding conversion:
strict: fail on error
ignore: ignore characters which can't be converted
replace: replace characters which can't be converted
with U+FFFD unicode replacement characters
(default: %(default_enc_errors)s)
--format=... output format; possible values:
default: one paragraph per line, each preceded with
<p> or <h> (headings)
boilerplate: same as default, except for boilerplate
paragraphs are included, too, preceded
with <b>
detailed: one paragraph per line, each preceded with
<p> tag containing detailed information
about classification as attributes
krdwrd: KrdWrd compatible format
--no-headings disable special handling of headings
--list-stoplists print a list of inbuilt stoplists and exit
-V, --version print version information and exit
-h, --help display this help and exit
If no HTML_FILE specified, input is read from stdin.
STOPLIST must be one of the following:
- one of the inbuilt stoplists; see:
%(progname)s --list-stoplists
- path to a file with the most frequent words for given language,
one per line, in UTF-8 encoding
- None - this activates a language-independent mode
Advanced options:
--length-low=INT (default %(length_low)i)
--length-high=INT (default %(length_high)i)
--stopwords-low=FLOAT (default %(stopwords_low)f)
--stopwords-high=FLOAT (default %(stopwords_high)f)
--max-link-density=FLOAT (default %(max_link_density)f)
--max-heading-distance=INT (default %(max_heading_distance)i)
""" % {
'progname': os.path.basename(os.path.basename(sys.argv[0])),
'length_low': LENGTH_LOW_DEFAULT,
'length_high': LENGTH_HIGH_DEFAULT,
'stopwords_low': STOPWORDS_LOW_DEFAULT,
'stopwords_high': STOPWORDS_HIGH_DEFAULT,
'max_link_density': MAX_LINK_DENSITY_DEFAULT,
'max_heading_distance': MAX_HEADING_DISTANCE_DEFAULT,
'default_encoding': DEFAULT_ENCODING,
'default_enc_errors': DEFAULT_ENC_ERRORS,
}
def output_default(paragraphs, fp=sys.stdout, no_boilerplate=True):
"""
Outputs the paragraphs as:
<tag> text of the first paragraph
<tag> text of the second paragraph
...
where <tag> is <p>, <h> or <b> which indicates
standard paragraph, heading or boilerplate respecitvely.
"""
for paragraph in paragraphs:
if paragraph.class_type == 'good':
if paragraph.heading:
tag = 'h'
else:
tag = 'p'
elif no_boilerplate:
continue
else:
tag = 'b'
print('<%s> %s' % (tag, cgi.escape(paragraph.text)), file=fp)
def output_detailed(paragraphs, fp=sys.stdout):
"""
Same as output_default, but only <p> tags are used and the following
attributes are added: class, cfclass and heading.
"""
for paragraph in paragraphs:
output = '<p class="%s" cfclass="%s" heading="%i" xpath="%s"> %s' % (
paragraph.class_type,
paragraph.cf_class,
int(paragraph.heading),
paragraph.xpath,
cgi.escape(paragraph.text)
)
print(output, file=fp)
def output_krdwrd(paragraphs, fp=sys.stdout):
"""
Outputs the paragraphs in a KrdWrd compatible format:
class<TAB>first text node
class<TAB>second text node
...
where class is 1, 2 or 3 which means
boilerplate, undecided or good respectively. Headings are output as
undecided.
"""
for paragraph in paragraphs:
if paragraph.class_type in ('good', 'neargood'):
if paragraph.heading:
cls = 2
else:
cls = 3
else:
cls = 1
for text_node in paragraph.text_nodes:
print('%i\t%s' % (cls, text_node.strip()), file=fp)
def main():
import getopt
from justext import __version__ as VERSION
try:
opts, args = getopt.getopt(sys.argv[1:], "o:s:hV", ["encoding=",
"enc-force", "enc-errors=", "format=",
"no-headings", "help", "version", "length-low=", "length-high=",
"stopwords-low=", "stopwords-high=", "max-link-density=",
"max-heading-distance=", "list-stoplists"])
except getopt.GetoptError as e:
print(e, file=sys.stderr)
print(usage(), file=sys.stderr)
sys.exit(1)
stream_writer = codecs.lookup('utf8')[-1]
fp_in = sys.stdin
fp_out = stream_writer(sys.stdout)
stoplist = None
format = 'default'
no_headings = False
length_low = LENGTH_LOW_DEFAULT
length_high = LENGTH_HIGH_DEFAULT
stopwords_low = STOPWORDS_LOW_DEFAULT
stopwords_high = STOPWORDS_HIGH_DEFAULT
max_link_density = MAX_LINK_DENSITY_DEFAULT
max_heading_distance = MAX_HEADING_DISTANCE_DEFAULT
encoding = None
default_encoding = DEFAULT_ENCODING
force_default_encoding = False
enc_errors = DEFAULT_ENC_ERRORS
try:
for o, a in opts:
if o in ("-h", "--help"):
print(usage())
sys.exit(0)
if o in ("-V", "--version"):
print("%s: jusText v%s\n\nCopyright (c) 2011 Jan Pomikalek <jan.pomikalek@gmail.com>" % (
os.path.basename(sys.argv[0]), VERSION))
sys.exit(0)
elif o == "--list-stoplists":
print("\n".join(get_stoplists()))
sys.exit(0)
elif o == "-o":
try:
fp_out = codecs.open(a, 'w', 'utf8')
except IOError as e:
raise JustextInvalidOptions(
"Can't open %s for writing: %s" % (a, e))
elif o == "-s":
if a.lower() == 'none':
stoplist = set()
else:
if os.path.isfile(a):
try:
fp_stoplist = codecs.open(a, 'r', 'utf8')
stoplist = set([l.strip() for l in fp_stoplist])
fp_stoplist.close()
except IOError as e:
raise JustextInvalidOptions(
"Can't open %s for reading: %s" % (a, e))
except UnicodeDecodeError as e:
raise JustextInvalidOptions(
"Unicode decoding error when reading "
"the stoplist (probably not in UTF-8): %s" % e)
elif a in get_stoplists():
stoplist = get_stoplist(a)
else:
if re.match('^\w*$', a):
# only alphabetical chars, probably misspelled or
# unsupported language
raise JustextInvalidOptions(
"Unknown stoplist: %s\nAvailable stoplists:\n%s" % (
a, '\n'.join(get_stoplists())))
else:
# probably incorrectly specified path
raise JustextInvalidOptions("File not found: %s" % a)
elif o == "--encoding":
try:
default_encoding = a
''.encode(default_encoding)
except LookupError:
raise JustextInvalidOptions("Uknown character encoding: %s" % a)
elif o == "--enc-force":
force_default_encoding = True
elif o == "--enc-errors":
if a.lower() in ['strict', 'ignore', 'replace']:
enc_errors = a.lower()
else:
raise JustextInvalidOptions("Invalid --enc-errors value: %s" % a)
elif o == "--format":
if a in ['default', 'boilerplate', 'detailed', 'krdwrd']:
format = a
else:
raise JustextInvalidOptions("Uknown output format: %s" % a)
elif o == "--no-headings":
no_headings = True
elif o == "--length-low":
try:
length_low = int(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Integer expected." % (o, a))
elif o == "--length-high":
try:
length_high = int(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Integer expected." % (o, a))
elif o == "--stopwords-low":
try:
stopwords_low = float(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Float expected." % (o, a))
elif o == "--stopwords-high":
try:
stopwords_high = float(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Float expected." % (o, a))
elif o == "--max-link-density":
try:
max_link_density = float(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Float expected." % (o, a))
elif o == "--max-heading-distance":
try:
max_heading_distance = int(a)
except ValueError:
raise JustextInvalidOptions(
"Invalid value for %s: '%s'. Integer expected." % (o, a))
if force_default_encoding:
encoding = default_encoding
if stoplist is None:
raise JustextInvalidOptions("No stoplist specified.")
if not stoplist:
# empty stoplist, switch to language-independent mode
stopwords_high = 0
stopwords_low = 0
if args:
try:
if re.match(r"[^:/]+://", args[0]):
fp_in = urllib.urlopen(args[0])
else:
fp_in = open(args[0], 'r')
except (IOError, URLError) as e:
raise JustextInvalidOptions(
"Can't open %s for reading: %s" % (args[0], e))
sys.exit(1)
html_text = fp_in.read()
if fp_in is not sys.stdin:
fp_in.close()
paragraphs = justext(html_text, stoplist, length_low, length_high,
stopwords_low, stopwords_high, max_link_density, max_heading_distance,
no_headings, encoding, default_encoding, enc_errors)
if format == "default":
output_default(paragraphs, fp_out)
elif format == "boilerplate":
output_default(paragraphs, fp_out, no_boilerplate=False)
elif format == "detailed":
output_detailed(paragraphs, fp_out)
elif format == "krdwrd":
output_krdwrd(paragraphs, fp_out)
else:
# this should not happen; format checked when parsing options
raise AssertionError("Unknown format: %s" % format)
except JustextError as e:
print("%s: %s" % (os.path.basename(sys.argv[0]), e), file=sys.stderr)
sys.exit(1)
if __name__ == "__main__":
main()
|
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import uuid
from mock import ANY, patch, MagicMock
import swiftclient.client
from testtools import ExpectedException, matchers
from trove.common import cfg
from trove.common.context import TroveContext
from trove.common import exception
from trove.common import glance_remote
from trove.common import remote
from trove.tests.fakes.swift import SwiftClientStub
from trove.tests.unittests import trove_testtools
class TestRemote(trove_testtools.TestCase):
def setUp(self):
super(TestRemote, self).setUp()
def tearDown(self):
super(TestRemote, self).tearDown()
@patch.object(swiftclient.client.Connection, 'get_auth')
def test_creation(self, get_auth_mock):
self.assertIsNotNone(swiftclient.client.Connection())
def test_create_swift_client(self):
mock_resp = MagicMock()
with patch.object(swiftclient.client.Connection, 'get_container',
MagicMock(return_value=["text", mock_resp])):
service_catalog = [{'endpoints': [{'region': 'RegionOne',
'publicURL': 'example.com'}],
'type': 'object-store'}]
client = remote.create_swift_client(TroveContext(
tenant=uuid.uuid4().hex,
service_catalog=service_catalog))
headers, container = client.get_container('bob')
self.assertIs(headers, "text")
self.assertIs(container, mock_resp)
def test_empty_account(self):
"""
this is an account with no containers and no objects
"""
# setup expectation
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
# interact
conn = swiftclient.client.Connection()
account_info = conn.get_account()
self.assertThat(account_info, matchers.Not(matchers.Is(None)))
self.assertThat(len(account_info), matchers.Is(2))
self.assertThat(account_info, matchers.IsInstance(tuple))
self.assertThat(account_info[0], matchers.IsInstance(dict))
self.assertThat(
account_info[0],
matchers.KeysEqual('content-length', 'accept-ranges',
'x-timestamp', 'x-trans-id', 'date',
'x-account-bytes-used',
'x-account-container-count',
'content-type',
'x-account-object-count'))
self.assertThat(account_info[1], matchers.IsInstance(list))
self.assertThat(len(account_info[1]), matchers.Is(0))
def test_one_container(self):
"""
tests to ensure behavior is normal with one container
"""
# setup expectation
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
cont_name = 'a-container-name'
swift_stub.with_container(cont_name)
# interact
conn = swiftclient.client.Connection()
conn.get_auth()
conn.put_container(cont_name)
# get headers plus container metadata
self.assertThat(len(conn.get_account()), matchers.Is(2))
# verify container details
account_containers = conn.get_account()[1]
self.assertThat(len(account_containers), matchers.Is(1))
self.assertThat(account_containers[0],
matchers.KeysEqual('count', 'bytes', 'name'))
self.assertThat(account_containers[0]['name'],
matchers.Is(cont_name))
# get container details
cont_info = conn.get_container(cont_name)
self.assertIsNotNone(cont_info)
self.assertThat(
cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count', 'accept-ranges',
'x-container-bytes-used', 'x-timestamp',
'x-trans-id', 'date', 'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(0))
# remove container
swift_stub.without_container(cont_name)
with ExpectedException(swiftclient.ClientException):
conn.get_container(cont_name)
# ensure there are no more containers in account
self.assertThat(len(conn.get_account()[1]), matchers.Is(0))
def test_one_object(self):
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_object('bob', 'test', 'test_contents')
# create connection
conn = swiftclient.client.Connection()
# test container lightly
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp',
'x-trans-id',
'date',
'content-type'))
cont_objects = cont_info[1]
self.assertThat(len(cont_objects), matchers.Equals(1))
obj_1 = cont_objects[0]
self.assertThat(obj_1, matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
# test object api - not much to do here
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
# test remove object
swift_stub.without_object('bob', 'test')
# interact
with ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(0))
def test_two_objects(self):
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('bob')
swift_stub.with_container('bob2')
swift_stub.with_object('bob', 'test', 'test_contents')
swift_stub.with_object('bob', 'test2', 'test_contents2')
conn = swiftclient.client.Connection()
self.assertIs(len(conn.get_account()), 2)
cont_info = conn.get_container('bob')
self.assertIsNotNone(cont_info)
self.assertThat(cont_info[0],
matchers.KeysEqual('content-length',
'x-container-object-count',
'accept-ranges',
'x-container-bytes-used',
'x-timestamp',
'x-trans-id',
'date',
'content-type'))
self.assertThat(len(cont_info[1]), matchers.Equals(2))
self.assertThat(cont_info[1][0], matchers.Equals(
{'bytes': 13, 'last_modified': '2013-03-15T22:10:49.361950',
'hash': 'ccc55aefbf92aa66f42b638802c5e7f6', 'name': 'test',
'content_type': 'application/octet-stream',
'contents': 'test_contents'}))
self.assertThat(conn.get_object('bob', 'test')[1],
matchers.Is('test_contents'))
self.assertThat(conn.get_object('bob', 'test2')[1],
matchers.Is('test_contents2'))
swift_stub.without_object('bob', 'test')
with ExpectedException(swiftclient.ClientException):
conn.delete_object('bob', 'test')
self.assertThat(len(conn.get_container('bob')[1]), matchers.Is(1))
swift_stub.without_container('bob')
with ExpectedException(swiftclient.ClientException):
conn.get_container('bob')
self.assertThat(len(conn.get_account()), matchers.Is(2))
def test_nonexisting_container(self):
"""
when a container does not exist and is accessed then a 404 is returned
"""
with SwiftClientStub() as swift_stub:
swift_stub.with_account('123223')
swift_stub.with_container('existing')
conn = swiftclient.client.Connection()
with ExpectedException(swiftclient.ClientException):
conn.get_container('nonexisting')
def test_replace_object(self):
"""
Test to ensure that if an object is updated the container object
count is the same and the contents of the object are updated
"""
with SwiftClientStub() as swift_stub:
swift_stub.with_account('1223df2')
swift_stub.with_container('new-container')
swift_stub.with_object('new-container', 'new-object',
'new-object-contents')
conn = swiftclient.client.Connection()
conn.put_object('new-container', 'new-object',
'new-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is('new-object-contents'))
# set expected behavior - trivial here since it is the intended
# behavior however keep in mind this is just to support testing of
# trove components
swift_stub.with_object('new-container', 'new-object',
'updated-object-contents')
conn.put_object('new-container', 'new-object',
'updated-object-contents')
obj_resp = conn.get_object('new-container', 'new-object')
self.assertThat(obj_resp, matchers.Not(matchers.Is(None)))
self.assertThat(len(obj_resp), matchers.Is(2))
self.assertThat(obj_resp[1], matchers.Is(
'updated-object-contents'))
# ensure object count has not increased
self.assertThat(len(conn.get_container('new-container')[1]),
matchers.Is(1))
class TestCreateCinderClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateCinderClient, self).setUp()
self.volumev2_public_url = 'http://publicURL/v2'
self.volume_public_url_region_two = 'http://publicURL-r2/v1'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.volumev2_public_url,
}
],
'type': 'volumev2'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.volume_public_url_region_two,
}
],
'type': 'volume'
}
]
def tearDown(self):
super(TestCreateCinderClient, self).tearDown()
cfg.CONF.clear_override('cinder_url')
cfg.CONF.clear_override('cinder_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_cinder_client,
TroveContext())
def test_create_with_conf_override(self):
cinder_url_from_conf = 'http://example.com'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('cinder_url', cinder_url_from_conf)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_conf_override_trailing_slash(self):
cinder_url_from_conf = 'http://example.com/'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('cinder_url', cinder_url_from_conf)
client = remote.create_cinder_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (cinder_url_from_conf, tenant_from_ctx),
client.client.management_url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volumev2_public_url,
client.client.management_url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('cinder_service_type', 'volume')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_cinder_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.volume_public_url_region_two,
client.client.management_url)
class TestCreateNovaClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateNovaClient, self).setUp()
self.compute_public_url = 'http://publicURL/v2'
self.computev3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.compute_public_url,
}
],
'type': 'compute'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.computev3_public_url_region_two,
}
],
'type': 'computev3'
}
]
def tearDown(self):
super(TestCreateNovaClient, self).tearDown()
cfg.CONF.clear_override('nova_compute_url')
cfg.CONF.clear_override('nova_compute_service_type')
cfg.CONF.clear_override('os_region_name')
cfg.CONF.clear_override('nova_proxy_admin_pass')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_nova_client,
TroveContext())
def test_create_with_conf_override(self):
nova_url_from_conf = 'http://example.com'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s/%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.endpoint_override)
def test_create_with_conf_override_trailing_slash(self):
nova_url_from_conf = 'http://example.com/'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf)
client = remote.create_nova_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (nova_url_from_conf, tenant_from_ctx),
client.client.endpoint_override)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.compute_public_url,
client.client.endpoint_override)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('nova_compute_service_type', 'computev3')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_nova_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.computev3_public_url_region_two,
client.client.endpoint_override)
def test_create_admin_client(self):
nova_url_from_conf = 'http://adminexample.com/'
cfg.CONF.set_override('nova_compute_url', nova_url_from_conf)
admin_user = 'admin1'
admin_pass = 'adminpwd'
admin_tenant_id = uuid.uuid4().hex
admin_client = remote.create_admin_nova_client(
TroveContext(user=admin_user,
auth_token=admin_pass,
tenant=admin_tenant_id))
# self.assertEqual(admin_user, admin_client.client.user)
# self.assertEqual(admin_pass, admin_client.client.password)
self.assertEqual('%s%s' % (nova_url_from_conf, admin_tenant_id),
admin_client.client.endpoint_override)
@patch('trove.common.remote.Client', autospec=True)
def test_nova_client_password_passthrough(self, nova_mock):
test_domain = 'test_domain_name'
ctx = TroveContext(user='admin1',
project_id='project_id',
user_domain_name=test_domain,
service_catalog=self.service_catalog)
remote.nova_client(ctx, password='adminpass')
nova_mock.assert_called_with(ANY, username='admin1',
password='adminpass',
user_domain_name=test_domain,
project_id='project_id',
auth_token=None,
auth_url=ANY,
endpoint_override=ANY,
project_domain_name=ANY,
insecure=False)
@patch('trove.common.remote.create_nova_client', autospec=True)
def test_admin_client_password(self, nc_mock):
cfg.CONF.set_override('nova_proxy_admin_pass', 's3cr3t3')
remote.create_admin_nova_client('mycontext')
nc_mock.assert_called_with('mycontext', password='s3cr3t3')
class TestCreateSwiftClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateSwiftClient, self).setUp()
self.swift_public_url = 'http://publicURL/v2'
self.swiftv3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.swift_public_url,
}
],
'type': 'object-store'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.swiftv3_public_url_region_two,
}
],
'type': 'object-storev3'
}
]
def tearDown(self):
super(TestCreateSwiftClient, self).tearDown()
cfg.CONF.clear_override('swift_url')
cfg.CONF.clear_override('swift_service_type')
cfg.CONF.clear_override('os_region_name')
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.create_swift_client,
TroveContext())
def test_create_with_conf_override(self):
swift_url_from_conf = 'http://example.com/AUTH_'
tenant_from_ctx = uuid.uuid4().hex
cfg.CONF.set_override('swift_url', swift_url_from_conf)
client = remote.create_swift_client(
TroveContext(tenant=tenant_from_ctx))
self.assertEqual('%s%s' % (swift_url_from_conf, tenant_from_ctx),
client.url)
def test_create_with_catalog_and_default_service_type(self):
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swift_public_url,
client.url)
def test_create_with_catalog_all_opts(self):
cfg.CONF.set_override('swift_service_type', 'object-storev3')
cfg.CONF.set_override('os_region_name', 'RegionTwo')
client = remote.create_swift_client(
TroveContext(service_catalog=self.service_catalog))
self.assertEqual(self.swiftv3_public_url_region_two,
client.url)
class TestCreateGlanceClient(trove_testtools.TestCase):
def setUp(self):
super(TestCreateGlanceClient, self).setUp()
self.glance_public_url = 'http://publicURL/v2'
self.glancev3_public_url_region_two = 'http://publicURL-r2/v3'
self.service_catalog = [
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': self.glance_public_url,
}
],
'type': 'image'
},
{
'endpoints': [
{
'region': 'RegionOne',
'publicURL': 'http://publicURL-r1/v1',
},
{
'region': 'RegionTwo',
'publicURL': self.glancev3_public_url_region_two,
}
],
'type': 'imagev3'
}
]
def test_create_with_no_conf_no_catalog(self):
self.assertRaises(exception.EmptyCatalog,
glance_remote.create_glance_client,
TroveContext())
def test_create(self):
client = glance_remote.create_glance_client(
TroveContext(service_catalog=self.service_catalog))
self.assertIsNotNone(client)
class TestEndpoints(trove_testtools.TestCase):
"""
Copied from glance/tests/unit/test_auth.py.
"""
def setUp(self):
super(TestEndpoints, self).setUp()
self.service_catalog = [
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
def test_get_endpoint_empty_catalog(self):
self.assertRaises(exception.EmptyCatalog,
remote.get_endpoint,
None)
def test_get_endpoint_with_custom_server_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
def test_get_endpoint_with_custom_endpoint_type(self):
endpoint = remote.get_endpoint(self.service_catalog,
service_type='object-store',
endpoint_type='internalURL',
endpoint_region='RegionOne')
self.assertEqual('http://internalURL/', endpoint)
def test_get_endpoint_raises_with_invalid_service_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='foo')
def test_get_endpoint_raises_with_invalid_endpoint_type(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_type='foo',
endpoint_region='RegionOne')
def test_get_endpoint_raises_with_invalid_endpoint_region(self):
self.assertRaises(exception.NoServiceEndpoint,
remote.get_endpoint,
self.service_catalog,
service_type='object-store',
endpoint_region='foo',
endpoint_type='internalURL')
def test_get_endpoint_ignores_missing_type(self):
service_catalog = [
{
'name': 'Other Service',
},
{
'endpoint_links': [],
'endpoints': [
{
'adminURL': 'http://localhost:8080/',
'region': 'RegionOne',
'internalURL': 'http://internalURL/',
'publicURL': 'http://publicURL/',
},
{
'adminURL': 'http://localhost:8081/',
'region': 'RegionTwo',
'internalURL': 'http://internalURL2/',
'publicURL': 'http://publicURL2/',
},
],
'type': 'object-store',
'name': 'Object Storage Service',
}
]
endpoint = remote.get_endpoint(service_catalog,
service_type='object-store',
endpoint_region='RegionOne')
self.assertEqual('http://publicURL/', endpoint)
|
|
#! /usr/bin/env python
import multiprocessing as mp
import numpy as np
import os
import shutil
import configparser
import subprocess
import collections
"""Module that contains functions to help run batches and parameter sweeps
"""
def _parse_config_fr_to_by(config_string):
"""Returns the value for the fr, to, and by, range in the config file.
Args:
config_string (string): a string of fr, to, by values
separated by \n character
Returns:
tuple: float of number to the right of the fr, to, and by values in the
config file
Examples: # TODO test this
>>> print(_parse_config_fr_to_by("fr = 0\nto = 1\nby = .2")
(0.0, 1.0, 0.2)
"""
config_string = config_string.strip()
fr_to_by_list = config_string.split('\n')
# for each fr, to, by value, take the value to the right of the equal sign
fr_to_by_str = tuple(e.split("=")[1].strip() for e in fr_to_by_list)
fr_to_by_float = map(float, fr_to_by_str)
return fr_to_by_float
def parse_config_list(config_string, sep=','):
"""Returns a python tuple of a delimited string from the config file
Args:
config_string (string): a delimited string separated by the sep param
sep (string): delimited for config_string, default is ','
Returns:
tuple: of string split by the sep
"""
return tuple(config_string.split(','))
def _get_sweep_values_range(fr, to, by):
"""Returns an ndarray values that will be used for the simulation run.
Will include the 'to' value if the 'by' step will does not exceed the
'to' value
Args:
fr (float): value of where the sweep will start
to (float): value of where the sweep will end
by (float): values between fr and to by specified step
Returns:
values (ndarray): values for parameter sweep
Examples:
>>> print(get_sweep_values(0, 10, 2))
array([0, 2, 4, 6, 8, 10])
>>> print(get_sweep_values(1, 10, 2))
array([1, 3, 5, 7, 9])
"""
assert(isinstance(fr, float))
assert(isinstance(to, float))
assert(isinstance(by, float))
values = np.arange(fr, to, by)
# print('values in _get_sweep_values_range(): ', str(values))
# make the range inclusive on the right, since this is what
# the usuer will most likely mean in the parameter file
end_value = values[-1] + by
if end_value == to:
values = np.append(values, values[-1] + by)
# print('return values in _get_sweep_values_range(): ', str(values))
return values
def _get_sweep_values_list(config_string):
"""Returns an ndarray values that will be used for the simulation run
Args:
config_string (string): config string read form config file
Returns:
ndarray of values
"""
return np.asarray(
list(float(x.strip()) for x in (config_string.split(','))))
def get_sweep_values(config_string, sweep_type):
"""Returns an ndarray of values that will be used for the simulation run
Takes a config string form the config file, and the sweep type. The sweep
type can be a range (the fr, to, by syntax) or a list of values. This
function will either call the
_get_sweep_values_ftb or
_get_sweep_values_list
depending on the sweep_type
Args:
config_string (string): string from the config file
sweep_type (string): type of config string, either range or list
Returns:
values (ndarray): values for parameter sweep
"""
if (sweep_type == "range"):
values_range = _ftb_string_to_values(config_string)
return(values_range)
elif (sweep_type == "list"):
values_list = _get_sweep_values_list(config_string)
return(values_list)
else:
raise ValueError(str("Unknown sweep type, can be range or list " +
str(sweep_type) + " passed"))
def copy_directory(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
def _ftb_string_to_values(ftb_string):
"""
"""
f, t, b = _parse_config_fr_to_by(ftb_string)
# print(f, t, b)
# sweep_values = np.arange(f, t, b)
# print(str(sweep_values))
# print(get_sweep_values(f, t, b))
sweep_values = _get_sweep_values_range(f, t, b)
# print("return sweep_values in _ftb_string_to_values(): ", sweep_values)
return sweep_values
def _format_values_lens(tuple_of_values):
# assert isinstance(tuple_of_values, collections.namedtuple)
# assert(len(tuple_of_values) == 5)
assert(len(tuple_of_values) == 7)
# agents, delta, epsilon, criterion, run = tuple_of_values
agents, run,\
clamp_strength,\
between_mean, between_sd,\
within_mean, within_sd = (tuple_of_values.agents_sweep_values,
tuple_of_values.num_sims_per,
tuple_of_values.clamp_sweep_values,
tuple_of_values.between_bank_mean_values,
tuple_of_values.between_bank_sd_values,
tuple_of_values.within_bank_mean_values,
tuple_of_values.within_bank_sd_values)
agents = int(agents)
# delta = float("{0:.2f}".format(delta))
# epsilon = float("{0:.2f}".format(epsilon))
# criterion = int(criterion)
run = int(run)
clamp_strength = float("{0:.2f}".format(clamp_strength))
between_mean = float("{0:.2f}".format(between_mean))
between_sd = float("{0:.2f}".format(between_sd))
within_mean = float("{0:.2f}".format(within_mean))
within_sd = float("{0:.2f}".format(within_sd))
# assert isinstance(agents, int)
# assert isinstance(delta, float)
# assert isinstance(epsilon, float)
# assert isinstance(criterion, int)
# assert isinstance(run, int)
Parameters = collections.namedtuple('Parameters',
['agents',
'between_mean', 'between_sd',
'within_mean', 'within_sd',
'clamp_strength', 'run'])
values = Parameters(agents=agents,
between_mean=between_mean,
between_sd=between_sd,
within_mean=within_mean,
within_sd=within_sd,
clamp_strength=clamp_strength,
run=run)
# print("MANN values: ", values)
return(values)
# return tuple((agents, delta, epsilon, criterion, run))
# return tuple((agents, between_mean, between_sd, within_mean, within_sd,
# clamp_strength, run))
def _format_values_watts(tuple_of_values):
assert(len(tuple_of_values) == 2)
agents, run = tuple_of_values
agents = int(agents)
run = int(run)
assert isinstance(agents, int)
assert isinstance(run, int)
return tuple((agents, run))
def format_values(base_directory, tuple_of_values):
if base_directory in ["02-lens"]:
return(_format_values_lens(tuple_of_values))
elif base_directory in ["01-watts"]:
return(_format_values_watts(tuple_of_values))
# def _create_folder_lens(base_directory,
# current_time,
# agents_str,
# delta_str,
# epsilon_str,
# criterion_str,
# run_str):
# new_sim_folder_name = '_'.join(['a'+agents_str,
# 'd'+delta_str,
# 'e'+epsilon_str,
# 'c'+criterion_str,
# 'r'+run_str])
# print(new_sim_folder_name, " created")
# batch_folder_name = '_'.join([base_directory, 'batch', current_time])
# # print('batch folder name: ', batch_folder_name)
# dir_to_copy_from = os.path.join(here, base_directory)
# # print('from: ', dir_to_copy_from)
# batch_folder_full_path = os.path.join(here, '..', 'results', 'simulations',
# batch_folder_name)
# if not os.path.exists(batch_folder_full_path):
# # print('created: ', batch_folder_full_path)
# os.makedirs(batch_folder_full_path)
# dir_to_copy_to = os.path.join(batch_folder_full_path,
# new_sim_folder_name)
# # print('to : ', dir_to_copy_to)
# copy_directory(dir_to_copy_from, dir_to_copy_to)
# return dir_to_copy_to
def _create_folder_lens(base_directory,
here,
current_time,
agents_str,
run_str):
new_sim_folder_name = '_'.join(['a'+agents_str,
'r'+run_str])
print(new_sim_folder_name, " created")
batch_folder_name = '_'.join([base_directory, 'batch', current_time])
print('batch folder name: ', batch_folder_name)
dir_to_copy_from = os.path.join(here, '..', base_directory)
print('from: ', dir_to_copy_from)
batch_folder_path = os.path.join(here, '..', '..',
'results', 'simulations',
batch_folder_name)
if not os.path.exists(batch_folder_path):
print('created: ', batch_folder_path)
os.makedirs(batch_folder_path)
dir_to_copy_to = os.path.join(batch_folder_path,
new_sim_folder_name)
print('to : ', dir_to_copy_to)
copy_directory(dir_to_copy_from, dir_to_copy_to)
return dir_to_copy_to
def _create_folder_watts(base_directory,
here,
current_time,
agents_str,
run_str):
new_sim_folder_name = '_'.join(['a'+agents_str,
'r'+run_str])
print(new_sim_folder_name, " created")
batch_folder_name = '_'.join([base_directory, 'batch', current_time])
print('batch folder name: ', batch_folder_name)
dir_to_copy_from = os.path.join(here, '..', base_directory)
print('from: ', dir_to_copy_from)
batch_folder_path = os.path.join(here, '..', '..',
'results', 'simulations',
batch_folder_name)
if not os.path.exists(batch_folder_path):
print('created: ', batch_folder_path)
os.makedirs(batch_folder_path)
dir_to_copy_to = os.path.join(batch_folder_path,
new_sim_folder_name)
print('to : ', dir_to_copy_to)
copy_directory(dir_to_copy_from, dir_to_copy_to)
return dir_to_copy_to
def create_folder(base_directory, here, **kwargs):
if base_directory in ["02-lens"]:
# dir_to_copy_to = _create_folder_lens(**kwargs)
dir_to_copy_to = _create_folder_lens(base_directory, here, **kwargs)
elif base_directory in ["01-watts"]:
dir_to_copy_to = _create_folder_watts(base_directory, here, **kwargs)
return dir_to_copy_to
def _update_init_file_lens(folder_name,
agents,
delta,
epsilon,
criterion,
run):
"""Updates the config file for a particular set of parameters for sweep
Args:
mutation (float): mutation value parameter for sweep
ci (int): criterion value parameter for sweep
run_number (int): run number for a set of value parameters for sweep
"""
assert isinstance(agents, int)
assert isinstance(delta, float)
assert isinstance(epsilon, float)
assert isinstance(criterion, int)
assert isinstance(run, int)
#
# Read in config file
#
sim_config = configparser.SafeConfigParser()
sim_config_file_dir = os.path.join(folder_name, 'config.ini')
sim_config.read(sim_config_file_dir)
#
# Get new config file values
#
# set agents
sim_config.set('LENSParameters', 'NumberOfAgents',
str(agents))
# set delta
sim_config.set('LENSParameters', 'WeightTrainExampleMutationsProb',
str(delta))
# set epsilon
sim_config.set('LENSParameters', 'Epsilon', str(epsilon))
# set criterion
sim_config.set('LENSParameters', 'Criterion', str(criterion))
# set run
sim_config.set('General', 'RunNumber', str(run))
#
# Write new config file
#
with open(sim_config_file_dir, 'w') as update_config:
sim_config.write(update_config)
# print('config file updated: ', sim_config_file_dir)
def _update_init_file_watts(config,
agents,
run):
"""Updates the config file for a particular set of parameters for sweep
Args:
run_number (int): run number for a set of value parameters for sweep
"""
assert isinstance(agents, int)
assert isinstance(run, int)
# set agents
sim_config.set('General', 'NumberOfAgents',
str(agents))
# set run
sim_config.set('General', 'RunNumber', str(run))
return sim_config
def update_init_file(folder_name, **kwargs):
"""Updates the config file for a particular set of parameters for sweep
"""
#
# Read in config file
#
sim_config = configparser.SafeConfigParser()
sim_config_file_dir = os.path.join(folder_name, 'config.ini')
sim_config.read(sim_config_file_dir)
if sim_config.get("General", "BaseDirectory") == "01-watts":
sim_config = _update_init_file_watts(sim_config,
agents=kwargs.get('agents'),
run=kwargs.get('run'))
else:
print('pass')
pass
#
# Write new config file
#
with open(sim_config_file_dir, 'w') as update_config:
sim_config.write(update_config)
# print('config file updated: ', sim_config_file_dir)
def num_cores(num_cores=None):
if num_cores is not None:
return int(num_cores)
else:
cores = mp.cpu_count()
print("Number of cores on this computer: ", cores)
if cores <= 12:
return cores
else:
return int(cores * (2/3.0))
def chmod_recursive(directory, dir_chmod=0o555, file_chmod=0o444):
os.chmod(directory, dir_chmod)
for root, dirs, files in os.walk(directory):
for sim_dir in dirs:
os.chmod(os.path.join(root, sim_dir), dir_chmod)
for sim_file in files:
os.chmod(os.path.join(root, sim_file), file_chmod)
def run_simulation(folder_name):
ex_file = os.path.join(folder_name, 'main.py')
subprocess.call(['python', ex_file])
chmod_recursive(folder_name)
|
|
from __future__ import print_function
import functools
from copy import copy
import numpy as np
from scipy.stats import norm as ndist
import functools
from copy import copy
import numpy as np
from scipy.stats import norm as ndist
import regreg.api as rr
import regreg.affine as ra
from ..constraints.affine import constraints
from ..algorithms.sqrt_lasso import solve_sqrt_lasso, choose_lambda
from .query import (query,
multiple_queries,
langevin_sampler,
affine_gaussian_sampler)
from .reconstruction import reconstruct_opt
from .randomization import randomization
from .base import restricted_estimator
from .glm import (pairs_bootstrap_glm,
glm_nonparametric_bootstrap,
glm_parametric_covariance)
from ..algorithms.debiased_lasso import debiasing_matrix
class lasso_view(query):
def __init__(self,
loss,
epsilon,
penalty,
randomization,
perturb=None,
solve_args={'min_its': 50, 'tol': 1.e-10}):
"""
Fits the logistic regression to a candidate active set, without penalty.
Calls the method bootstrap_covariance() to bootstrap the covariance matrix.
Computes $\bar{\beta}_E$ which is the restricted
M-estimator (i.e. subject to the constraint $\beta_{-E}=0$).
Parameters:
-----------
active: np.bool
The active set from fitting the logistic lasso
solve_args: dict
Arguments to be passed to regreg solver.
Returns:
--------
None
Notes:
------
Sets self._beta_unpenalized which will be used in the covariance matrix calculation.
Also computes Hessian of loss at restricted M-estimator as well as the bootstrap covariance.
"""
query.__init__(self, randomization)
(self.loss,
self.epsilon,
self.penalty,
self.randomization) = (loss,
epsilon,
penalty,
randomization)
# Methods needed for subclassing a query
def solve(self, nboot=2000,
solve_args={'min_its': 20, 'tol': 1.e-10},
perturb=None):
self.randomize(perturb=perturb)
(loss,
randomized_loss,
epsilon,
penalty,
randomization) = (self.loss,
self.randomized_loss,
self.epsilon,
self.penalty,
self.randomization)
# initial solution
p = penalty.shape[0]
problem = rr.simple_problem(randomized_loss, penalty)
self.initial_soln = problem.solve(**solve_args)
# find the active groups and their direction vectors
# as well as unpenalized groups
active_signs = np.sign(self.initial_soln)
active = self._active = active_signs != 0
if isinstance(penalty, rr.l1norm):
self._lagrange = penalty.lagrange * np.ones(p)
unpenalized = np.zeros(p, np.bool)
elif isinstance(penalty, rr.weighted_l1norm):
self._lagrange = penalty.weights
unpenalized = self._lagrange == 0
else:
raise ValueError('penalty must be `l1norm` or `weighted_l1norm`')
active *= ~unpenalized
# solve the restricted problem
self._overall = (active + unpenalized) > 0
self._inactive = ~self._overall
self._unpenalized = unpenalized
_active_signs = active_signs.copy()
_active_signs[unpenalized] = np.nan # don't release sign of unpenalized variables
self.selection_variable = {'sign': _active_signs,
'variables': self._overall}
# initial state for opt variables
initial_subgrad = -(self.randomized_loss.smooth_objective(self.initial_soln, 'grad') +
self.randomized_loss.quadratic.objective(self.initial_soln, 'grad'))
# the quadratic of a smooth_atom is not included in computing the smooth_objective
self.initial_subgrad = initial_subgrad
initial_scalings = np.fabs(self.initial_soln[active])
initial_unpenalized = self.initial_soln[self._unpenalized]
self.observed_opt_state = np.concatenate([initial_scalings,
initial_unpenalized,
self.initial_subgrad[self._inactive]], axis=0)
# set the _solved bit
self._solved = True
# Now setup the pieces for linear decomposition
(loss,
epsilon,
penalty,
initial_soln,
overall,
inactive,
unpenalized) = (self.loss,
self.epsilon,
self.penalty,
self.initial_soln,
self._overall,
self._inactive,
self._unpenalized)
# we are implicitly assuming that
# loss is a pairs model
_beta_unpenalized = restricted_estimator(loss, overall, solve_args=solve_args)
beta_bar = np.zeros(p)
beta_bar[overall] = _beta_unpenalized
self._beta_full = beta_bar
# observed state for score in internal coordinates
self.observed_internal_state = np.hstack([_beta_unpenalized,
-loss.smooth_objective(beta_bar, 'grad')[inactive]])
# form linear part
self.num_opt_var = self.observed_opt_state.shape[0]
# (\bar{\beta}_{E \cup U}, N_{-E}, c_E, \beta_U, z_{-E})
# E for active
# U for unpenalized
# -E for inactive
_opt_linear_term = np.zeros((p, p))
_score_linear_term = np.zeros((p, p))
# \bar{\beta}_{E \cup U} piece -- the unpenalized M estimator
est_slice = slice(0, overall.sum())
X, y = loss.data
W = self.loss.saturated_loss.hessian(X.dot(beta_bar))
_hessian_active = np.dot(X.T, X[:, active] * W[:, None])
_hessian_unpen = np.dot(X.T, X[:, unpenalized] * W[:, None])
_score_linear_term[:, est_slice] = -np.hstack([_hessian_active, _hessian_unpen])
# N_{-(E \cup U)} piece -- inactive coordinates of score of M estimator at unpenalized solution
null_idx = np.arange(overall.sum(), p)
inactive_idx = np.nonzero(inactive)[0]
for _i, _n in zip(inactive_idx, null_idx):
_score_linear_term[_i, _n] = -1
# c_E piece
def signed_basis_vector(p, j, s):
v = np.zeros(p)
v[j] = s
return v
active_directions = np.array([signed_basis_vector(p, j, active_signs[j]) for j in np.nonzero(active)[0]]).T
scaling_slice = slice(0, active.sum())
if np.sum(active) == 0:
_opt_hessian = 0
else:
_opt_hessian = _hessian_active * active_signs[None, active] + epsilon * active_directions
_opt_linear_term[:, scaling_slice] = _opt_hessian
# beta_U piece
unpenalized_slice = slice(active.sum(), active.sum() + unpenalized.sum())
unpenalized_directions = np.array([signed_basis_vector(p, j, 1) for j in np.nonzero(unpenalized)[0]]).T
if unpenalized.sum():
_opt_linear_term[:, unpenalized_slice] = (_hessian_unpen
+ epsilon * unpenalized_directions)
# subgrad piece
subgrad_idx = range(active.sum() + unpenalized.sum(), active.sum() + inactive.sum() + unpenalized.sum())
subgrad_slice = slice(active.sum() + unpenalized.sum(), active.sum() + inactive.sum() + unpenalized.sum())
for _i, _s in zip(inactive_idx, subgrad_idx):
_opt_linear_term[_i, _s] = 1
# form affine part
_opt_affine_term = np.zeros(p)
idx = 0
_opt_affine_term[active] = active_signs[active] * self._lagrange[active]
# two transforms that encode score and optimization
# variable roles
self.opt_transform = (_opt_linear_term, _opt_affine_term)
self.score_transform = (_score_linear_term, np.zeros(_score_linear_term.shape[0]))
# everything now expressed in observed_score_state
self.observed_score_state = _score_linear_term.dot(self.observed_internal_state)
# now store everything needed for the projections
# the projection acts only on the optimization
# variables
# we form a dual group lasso object
# to do the projection
self._setup = True
self.subgrad_slice = subgrad_slice
self.scaling_slice = scaling_slice
self.unpenalized_slice = unpenalized_slice
self.ndim = loss.shape[0]
self.nboot = nboot
def get_sampler(self):
# setup the default optimization sampler
if not hasattr(self, "_sampler"):
penalty, inactive = self.penalty, self._inactive
inactive_lagrange = self.penalty.weights[inactive]
if not hasattr(self.randomization, "cov_prec"): # means randomization is not Gaussian
dual = rr.weighted_supnorm(1. / inactive_lagrange, bound=1.)
def projection(dual, subgrad_slice, scaling_slice, opt_state):
"""
Full projection for Langevin.
The state here will be only the state of the optimization variables.
"""
new_state = opt_state.copy() # not really necessary to copy
new_state[scaling_slice] = np.maximum(opt_state[scaling_slice], 0)
new_state[subgrad_slice] = dual.bound_prox(opt_state[subgrad_slice])
return new_state
projection = functools.partial(projection, dual, self.subgrad_slice, self.scaling_slice)
def grad_log_density(query,
rand_gradient,
score_state,
opt_state):
full_state = score_state + reconstruct_opt(query.opt_transform, opt_state)
return opt_linear.T.dot(rand_gradient(full_state).T)
grad_log_density = functools.partial(grad_log_density, self, self.randomization.gradient)
def log_density(query,
opt_linear,
rand_log_density,
score_state,
opt_state):
full_state = score_state + reconstruct_opt(query.opt_transform, opt_state)
return rand_log_density(full_state)
log_density = functools.partial(log_density, self, self.randomization.log_density)
self._sampler = langevin_sampler(self.observed_opt_state,
self.observed_score_state,
self.score_transform,
self.opt_transform,
projection,
grad_log_density,
log_density)
else:
# compute implied mean and covariance
cov, prec = self.randomization.cov_prec
prec_array = len(np.asarray(prec).shape) == 2
opt_linear, opt_offset = self.opt_transform
if prec_array:
cond_precision = opt_linear.T.dot(prec.dot(opt_linear))
cond_cov = np.linalg.inv(cond_precision)
logdens_linear = cond_cov.dot(opt_linear.T.dot(prec))
else:
cond_precision = opt_linear.T.dot(opt_linear) * prec
cond_cov = np.linalg.inv(cond_precision)
logdens_linear = cond_cov.dot(opt_linear.T) * prec
cond_mean = -logdens_linear.dot(self.observed_score_state + opt_offset)
# need a log_density function
# the conditional density of opt variables
# given the score
def log_density(logdens_linear, offset, cond_prec, score, opt):
if score.ndim == 1:
mean_term = logdens_linear.dot(score.T + offset).T
else:
mean_term = logdens_linear.dot(score.T + offset[:, None]).T
arg = opt + mean_term
return - 0.5 * np.sum(arg * cond_prec.dot(arg.T).T, 1)
log_density = functools.partial(log_density, logdens_linear, opt_offset, cond_precision)
# now make the constraints
# scaling constraints
I = np.identity(cond_cov.shape[0])
A_scaling = -I[self.scaling_slice]
b_scaling = np.zeros(A_scaling.shape[0])
A_subgrad = np.vstack([I[self.subgrad_slice],
-I[self.subgrad_slice]])
b_subgrad = np.hstack([inactive_lagrange,
inactive_lagrange])
linear_term = np.vstack([A_scaling, A_subgrad])
offset = np.hstack([b_scaling, b_subgrad])
affine_con = constraints(linear_term,
offset,
mean=cond_mean,
covariance=cond_cov)
logdens_transform = (logdens_linear, opt_offset)
self._sampler = affine_gaussian_sampler(affine_con,
self.observed_opt_state,
self.observed_score_state,
log_density,
logdens_transform,
selection_info=self.selection_variable) # should be signs and the subgradients we've conditioned on
return self._sampler
sampler = property(get_sampler, query.set_sampler)
def decompose_subgradient(self, condition=None, marginalize=None):
"""
ADD DOCSTRING
condition and marginalize should be disjoint
"""
p = self.penalty.shape[0]
condition_inactive = np.zeros(p, dtype=np.bool)
if condition is None:
condition = np.zeros(p, dtype=np.bool)
if marginalize is None:
marginalize = np.zeros(p, dtype=np.bool)
marginalize[self._overall] = 0
if np.any(condition * marginalize):
raise ValueError("cannot simultaneously condition and marginalize over a group's subgradient")
if not self._setup:
raise ValueError('setup_sampler should be called before using this function')
_inactive = self._inactive
limits_marginal = np.zeros_like(_inactive, np.float)
condition_inactive = _inactive * condition
moving_inactive = _inactive * ~(marginalize + condition)
margin_inactive = _inactive * marginalize
limits_marginal = self._lagrange
if np.asarray(self._lagrange).shape in [(), (1,)]:
limits_marginal = np.zeros_like(_inactive) * self._lagrange
opt_linear, opt_offset = self.opt_transform
new_linear = np.zeros((opt_linear.shape[0], (self._active.sum() +
self._unpenalized.sum() +
moving_inactive.sum())))
new_linear[:, self.scaling_slice] = opt_linear[:, self.scaling_slice]
new_linear[:, self.unpenalized_slice] = opt_linear[:, self.unpenalized_slice]
inactive_moving_idx = np.nonzero(moving_inactive)[0]
subgrad_idx = range(self._active.sum() + self._unpenalized.sum(),
self._active.sum() + self._unpenalized.sum() +
moving_inactive.sum())
for _i, _s in zip(inactive_moving_idx, subgrad_idx):
new_linear[_i, _s] = 1.
observed_opt_state = self.observed_opt_state[:(self._active.sum() +
self._unpenalized.sum() +
moving_inactive.sum())]
observed_opt_state[subgrad_idx] = self.initial_subgrad[moving_inactive]
condition_linear = np.zeros((opt_linear.shape[0], (self._active.sum() +
self._unpenalized.sum() +
condition_inactive.sum())))
new_offset = opt_offset + 0.
new_offset[condition_inactive] += self.initial_subgrad[condition_inactive]
new_opt_transform = (new_linear, new_offset)
if not hasattr(self.randomization, "cov_prec") or marginalize.sum(): # use Langevin -- not gaussian
def _fraction(_cdf, _pdf, full_state_plus, full_state_minus, margin_inactive):
return (np.divide(_pdf(full_state_plus) - _pdf(full_state_minus),
_cdf(full_state_plus) - _cdf(full_state_minus)))[margin_inactive]
def new_grad_log_density(query,
limits_marginal,
margin_inactive,
_cdf,
_pdf,
new_opt_transform,
deriv_log_dens,
score_state,
opt_state):
full_state = score_state + reconstruct_opt(new_opt_transform, opt_state)
p = query.penalty.shape[0]
weights = np.zeros(p)
if margin_inactive.sum() > 0:
full_state_plus = full_state + limits_marginal * margin_inactive
full_state_minus = full_state - limits_marginal * margin_inactive
weights[margin_inactive] = _fraction(_cdf, _pdf, full_state_plus, full_state_minus, margin_inactive)
weights[~margin_inactive] = deriv_log_dens(full_state)[~margin_inactive]
return -opt_linear.T.dot(weights)
new_grad_log_density = functools.partial(new_grad_log_density,
self,
limits_marginal,
margin_inactive,
self.randomization._cdf,
self.randomization._pdf,
new_opt_transform,
self.randomization._derivative_log_density)
def new_log_density(query,
limits_marginal,
margin_inactive,
_cdf,
_pdf,
new_opt_transform,
log_dens,
score_state,
opt_state):
full_state = score_state + reconstruct_opt(new_opt_transform, opt_state)
full_state = np.atleast_2d(full_state)
p = query.penalty.shape[0]
logdens = np.zeros(full_state.shape[0])
if margin_inactive.sum() > 0:
full_state_plus = full_state + limits_marginal * margin_inactive
full_state_minus = full_state - limits_marginal * margin_inactive
logdens += np.sum(np.log(_cdf(full_state_plus) - _cdf(full_state_minus))[:, margin_inactive],
axis=1)
logdens += log_dens(full_state[:, ~margin_inactive])
return np.squeeze(logdens) # should this be negative to match the gradient log density?
new_log_density = functools.partial(new_log_density,
self,
limits_marginal,
margin_inactive,
self.randomization._cdf,
self.randomization._pdf,
new_opt_transform,
self.randomization._log_density)
new_lagrange = self.penalty.weights[moving_inactive]
new_dual = rr.weighted_l1norm(new_lagrange, lagrange=1.).conjugate
def new_projection(dual,
noverall,
opt_state):
new_state = opt_state.copy()
new_state[self.scaling_slice] = np.maximum(opt_state[self.scaling_slice], 0)
new_state[noverall:] = dual.bound_prox(opt_state[noverall:])
return new_state
new_projection = functools.partial(new_projection,
new_dual,
self._overall.sum())
new_selection_variable = copy(self.selection_variable)
new_selection_variable['subgradient'] = self.observed_opt_state[condition_inactive]
self.sampler = langevin_sampler(observed_opt_state,
self.observed_score_state,
self.score_transform,
new_opt_transform,
new_projection,
new_grad_log_density,
new_log_density,
selection_info=(self, new_selection_variable))
else:
cov, prec = self.randomization.cov_prec
prec_array = len(np.asarray(prec).shape) == 2
if prec_array:
cond_precision = new_linear.T.dot(prec.dot(new_linear))
cond_cov = np.linalg.inv(cond_precision)
logdens_linear = cond_cov.dot(new_linear.T.dot(prec))
else:
cond_precision = new_linear.T.dot(new_linear) * prec
cond_cov = np.linalg.inv(cond_precision)
logdens_linear = cond_cov.dot(new_linear.T) * prec
cond_mean = -logdens_linear.dot(self.observed_score_state + new_offset)
def log_density(logdens_linear, offset, cond_prec, score, opt):
if score.ndim == 1:
mean_term = logdens_linear.dot(score.T + offset).T
else:
mean_term = logdens_linear.dot(score.T + offset[:, None]).T
arg = opt + mean_term
return - 0.5 * np.sum(arg * cond_prec.dot(arg.T).T, 1)
log_density = functools.partial(log_density, logdens_linear, new_offset, cond_precision)
# now make the constraints
# scaling constraints
# the scalings are first set of opt variables
# then unpenalized
# then the subgradients
I = np.identity(cond_cov.shape[0])
A_scaling = -I[self.scaling_slice]
b_scaling = np.zeros(A_scaling.shape[0])
A_subgrad = np.vstack([I[self._overall.sum():],
-I[self._overall.sum():]])
inactive_lagrange = self.penalty.weights[moving_inactive]
b_subgrad = np.hstack([inactive_lagrange,
inactive_lagrange])
linear_term = np.vstack([A_scaling, A_subgrad])
offset = np.hstack([b_scaling, b_subgrad])
affine_con = constraints(linear_term,
offset,
mean=cond_mean,
covariance=cond_cov)
logdens_transform = (logdens_linear, new_offset)
self._sampler = affine_gaussian_sampler(affine_con,
observed_opt_state,
self.observed_score_state,
log_density,
logdens_transform,
selection_info=self.selection_variable) # should be signs and the subgradients we've conditioned on
class glm_lasso(lasso_view):
def setup_sampler(self, scaling=1., solve_args={'min_its': 50, 'tol': 1.e-10}):
bootstrap_score = pairs_bootstrap_glm(self.loss,
self.selection_variable['variables'],
beta_full=self._beta_full,
inactive=~self.selection_variable['variables'])[0]
return bootstrap_score
class glm_lasso_parametric(lasso_view):
# this setup_sampler returns only the active set
def setup_sampler(self):
return self.selection_variable['variables']
class fixedX_lasso(lasso_view):
def __init__(self, X, Y, epsilon, penalty, randomization, solve_args={'min_its': 50, 'tol': 1.e-10}):
loss = glm.gaussian(X, Y)
lasso_view.__init__(self,
loss,
epsilon,
penalty,
randomization,
solve_args=solve_args)
def setup_sampler(self):
X, Y = self.loss.data
bootstrap_score = resid_bootstrap(self.loss,
self.selection_variable['variables'],
~self.selection_variable['variables'])[0]
return bootstrap_score
##### The class for users
class lasso(object):
r"""
A class for the LASSO for post-selection inference.
The problem solved is
.. math::
\text{minimize}_{\beta} \frac{1}{2n} \|y-X\beta\|^2_2 +
\lambda \|\beta\|_1 - \omega^T\beta + \frac{\epsilon}{2} \|\beta\|^2_2
where $\lambda$ is `lam`, $\omega$ is a randomization generated below
and the last term is a small ridge penalty.
"""
def __init__(self,
loglike,
feature_weights,
ridge_term,
randomizer_scale,
randomizer='gaussian',
parametric_cov_estimator=False,
perturb=None):
r"""
Create a new post-selection object for the LASSO problem
Parameters
----------
loglike : `regreg.smooth.glm.glm`
A (negative) log-likelihood as implemented in `regreg`.
feature_weights : np.ndarray
Feature weights for L-1 penalty. If a float,
it is brodcast to all features.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomization.
randomizer : str (optional)
One of ['laplace', 'logistic', 'gaussian']
"""
self.loglike = loglike
self.nfeature = p = self.loglike.shape[0]
if np.asarray(feature_weights).shape == ():
feature_weights = np.ones(loglike.shape) * feature_weights
self.feature_weights = np.asarray(feature_weights)
self.parametric_cov_estimator = parametric_cov_estimator
if randomizer == 'laplace':
self.randomizer = randomization.laplace((p,), scale=randomizer_scale)
elif randomizer == 'gaussian':
self.randomizer = randomization.isotropic_gaussian((p,), randomizer_scale)
elif randomizer == 'logistic':
self.randomizer = randomization.logistic((p,), scale=randomizer_scale)
self.ridge_term = ridge_term
self.penalty = rr.weighted_l1norm(self.feature_weights, lagrange=1.)
self._initial_omega = perturb
def fit(self,
solve_args={'tol': 1.e-12, 'min_its': 50},
perturb=None,
nboot=1000):
"""
Fit the randomized lasso using `regreg`.
Parameters
----------
solve_args : keyword args
Passed to `regreg.problems.simple_problem.solve`.
Returns
-------
signs : np.float
Support and non-zero signs of randomized lasso solution.
"""
if perturb is not None:
self._initial_omega = perturb
p = self.nfeature
if self.parametric_cov_estimator == True:
self._view = glm_lasso_parametric(self.loglike, self.ridge_term, self.penalty, self.randomizer)
else:
self._view = glm_lasso(self.loglike, self.ridge_term, self.penalty, self.randomizer)
self._view.solve(nboot=nboot, perturb=self._initial_omega, solve_args=solve_args)
self.signs = np.sign(self._view.initial_soln)
self.selection_variable = self._view.selection_variable
return self.signs
def decompose_subgradient(self,
condition=None,
marginalize=None):
"""
Marginalize over some if inactive part of subgradient
if applicable.
Parameters
----------
condition : np.bool
Which groups' subgradients should we condition on.
marginalize : np.bool
Which groups' subgradients should we marginalize over.
Returns
-------
None
"""
if not hasattr(self, "_view"):
raise ValueError("fit method should be run first")
self._view.decompose_subgradient(condition=condition,
marginalize=marginalize)
def summary(self,
selected_features,
parameter=None,
level=0.9,
ndraw=10000,
burnin=2000,
compute_intervals=False,
bootstrap_sampler=False,
subset=None):
"""
Produce p-values and confidence intervals for targets
of model including selected features
Parameters
----------
selected_features : np.bool
Binary encoding of which features to use in final
model and targets.
parameter : np.array
Hypothesized value for parameter -- defaults to 0.
level : float
Confidence level.
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
bootstrap : bool
Use wild bootstrap instead of Gaussian plugin.
"""
if not hasattr(self, "_view"):
raise ValueError('run `fit` method before producing summary.')
if parameter is None:
parameter = np.zeros(self.loglike.shape[0])
if np.asarray(selected_features).dtype != np.bool:
raise ValueError('selected_features should be a boolean array')
unpenalized_mle = restricted_estimator(self.loglike, selected_features)
if self.parametric_cov_estimator == False:
n = self.loglike.data[0].shape[0]
form_covariances = glm_nonparametric_bootstrap(n, n)
boot_target, boot_target_observed = pairs_bootstrap_glm(self.loglike, selected_features, inactive=None)
target_info = boot_target
else:
target_info = (selected_features, np.identity(unpenalized_mle.shape[0]))
form_covariances = glm_parametric_covariance(self.loglike)
opt_samplers = []
for q in [self._view]:
cov_info = q.setup_sampler()
if self.parametric_cov_estimator == False:
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info],
nsample=q.nboot)
else:
target_cov, score_cov = form_covariances(target_info,
cross_terms=[cov_info])
opt_samplers.append(q.sampler)
opt_samples = [opt_sampler.sample(ndraw,
burnin) for opt_sampler in opt_samplers]
if subset is not None:
target_cov = target_cov[subset][:, subset]
score_cov = score_cov[subset]
unpenalized_mle = unpenalized_mle[subset]
pivots = opt_samplers[0].coefficient_pvalues(unpenalized_mle, target_cov, score_cov, parameter=parameter,
sample=opt_samples[0])
if not np.all(parameter == 0):
pvalues = opt_samplers[0].coefficient_pvalues(unpenalized_mle, target_cov, score_cov,
parameter=np.zeros_like(parameter), sample=opt_samples[0])
else:
pvalues = pivots
intervals = None
if compute_intervals:
intervals = opt_samplers[0].confidence_intervals(unpenalized_mle, target_cov, score_cov,
sample=opt_samples[0])
return pivots, pvalues, intervals
@staticmethod
def gaussian(X,
Y,
feature_weights,
sigma=1.,
parametric_cov_estimator=False,
quadratic=None,
ridge_term=None,
randomizer_scale=None,
randomizer='gaussian',
perturb=None):
r"""
Squared-error LASSO with feature weights.
Objective function (before randomizer) is
$$
\beta \mapsto \frac{1}{2} \|Y-X\beta\|^2_2 + \sum_{i=1}^p \lambda_i |\beta_i|
$$
where $\lambda$ is `feature_weights`. The ridge term
is determined by the Hessian and `np.std(Y)` (scaled by $\sqrt{n/(n-1)}$) by default,
as is the randomizer scale.
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
Y : ndarray
Shape (n,) -- the response.
feature_weights: [float, sequence]
Penalty weights. An intercept, or other unpenalized
features are handled by setting those entries of
`feature_weights` to 0. If `feature_weights` is
a float, then all parameters are penalized equally.
sigma : float (optional)
Noise variance. Set to 1 if `covariance_estimator` is not None.
This scales the loglikelihood by `sigma**(-2)`.
quadratic : `regreg.identity_quadratic.identity_quadratic` (optional)
An optional quadratic term to be added to the objective.
Can also be a linear term by setting quadratic
coefficient to 0.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomizer.
randomizer : str
One of ['laplace', 'logistic', 'gaussian']
Returns
-------
L : `selection.randomized.convenience.lasso`
"""
loglike = rr.glm.gaussian(X, Y, coef=1. / sigma ** 2, quadratic=quadratic)
n, p = X.shape
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
return lasso(loglike,
np.asarray(feature_weights) / sigma ** 2,
ridge_term,
randomizer_scale,
randomizer=randomizer,
parametric_cov_estimator=parametric_cov_estimator,
perturb=perturb)
@staticmethod
def logistic(X,
successes,
feature_weights,
trials=None,
parametric_cov_estimator=False,
quadratic=None,
ridge_term=None,
randomizer='gaussian',
randomizer_scale=None,
perturb=None):
r"""
Logistic LASSO with feature weights.
Objective function is
$$
\beta \mapsto \ell(X\beta) + \sum_{i=1}^p \lambda_i |\beta_i|
$$
where $\ell$ is the negative of the logistic
log-likelihood (half the logistic deviance)
and $\lambda$ is `feature_weights`.
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
successes : ndarray
Shape (n,) -- response vector. An integer number of successes.
For data that is proportions, multiply the proportions
by the number of trials first.
feature_weights: [float, sequence]
Penalty weights. An intercept, or other unpenalized
features are handled by setting those entries of
`feature_weights` to 0. If `feature_weights` is
a float, then all parameters are penalized equally.
trials : ndarray (optional)
Number of trials per response, defaults to
ones the same shape as Y.
quadratic : `regreg.identity_quadratic.identity_quadratic` (optional)
An optional quadratic term to be added to the objective.
Can also be a linear term by setting quadratic
coefficient to 0.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomizer.
randomizer : str
One of ['laplace', 'logistic', 'gaussian']
Returns
-------
L : `selection.randomized.convenience.lasso`
"""
n, p = X.shape
loglike = rr.glm.logistic(X, successes, trials=trials, quadratic=quadratic)
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(Y) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5
return lasso(loglike, feature_weights,
ridge_term,
randomizer_scale,
parametric_cov_estimator=parametric_cov_estimator,
randomizer=randomizer,
perturb=perturb)
@staticmethod
def coxph(X,
times,
status,
feature_weights,
parametric_cov_estimator=False,
quadratic=None,
ridge_term=None,
randomizer='gaussian',
randomizer_scale=None,
perturb=None):
r"""
Cox proportional hazards LASSO with feature weights.
Objective function is
$$
\beta \mapsto \ell^{\text{Cox}}(\beta) + \sum_{i=1}^p \lambda_i |\beta_i|
$$
where $\ell^{\text{Cox}}$ is the
negative of the log of the Cox partial
likelihood and $\lambda$ is `feature_weights`.
Uses Efron's tie breaking method.
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
times : ndarray
Shape (n,) -- the survival times.
status : ndarray
Shape (n,) -- the censoring status.
feature_weights: [float, sequence]
Penalty weights. An intercept, or other unpenalized
features are handled by setting those entries of
`feature_weights` to 0. If `feature_weights` is
a float, then all parameters are penalized equally.
covariance_estimator : optional
If None, use the parameteric
covariance estimate of the selected model.
quadratic : `regreg.identity_quadratic.identity_quadratic` (optional)
An optional quadratic term to be added to the objective.
Can also be a linear term by setting quadratic
coefficient to 0.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomizer.
randomizer : str
One of ['laplace', 'logistic', 'gaussian']
Returns
-------
L : `selection.randomized.convenience.lasso`
"""
loglike = coxph_obj(X, times, status, quadratic=quadratic)
# scale for randomization seems kind of meaningless here...
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(times) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(Y) * np.sqrt(n / (n - 1.))
return lasso(loglike,
feature_weights,
ridge_term,
randomizer_scale,
randomizer=randomizer,
parametric_cov_estimator=parametric_cov_estimator,
perturb=perturb)
@staticmethod
def poisson(X,
counts,
feature_weights,
parametric_cov_estimator=False,
quadratic=None,
ridge_term=None,
randomizer_scale=None,
randomizer='gaussian',
perturb=None):
r"""
Poisson log-linear LASSO with feature weights.
Objective function is
$$
\beta \mapsto \ell^{\text{Poisson}}(\beta) + \sum_{i=1}^p \lambda_i |\beta_i|
$$
where $\ell^{\text{Poisson}}$ is the negative
of the log of the Poisson likelihood (half the deviance)
and $\lambda$ is `feature_weights`.
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
counts : ndarray
Shape (n,) -- the response.
feature_weights: [float, sequence]
Penalty weights. An intercept, or other unpenalized
features are handled by setting those entries of
`feature_weights` to 0. If `feature_weights` is
a float, then all parameters are penalized equally.
quadratic : `regreg.identity_quadratic.identity_quadratic` (optional)
An optional quadratic term to be added to the objective.
Can also be a linear term by setting quadratic
coefficient to 0.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomizer.
randomizer : str
One of ['laplace', 'logistic', 'gaussian']
Returns
-------
L : `selection.randomized.convenience.lasso`
"""
n, p = X.shape
loglike = rr.glm.poisson(X, counts, quadratic=quadratic)
# scale for randomizer seems kind of meaningless here...
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.std(counts) * np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.std(counts) * np.sqrt(n / (n - 1.))
return lasso(loglike,
feature_weights,
ridge_term,
randomizer_scale,
randomizer=randomizer,
parametric_cov_estimator=parametric_cov_estimator,
perturb=perturb)
@staticmethod
def sqrt_lasso(X,
Y,
feature_weights,
quadratic=None,
parametric_cov_estimator=False,
sigma_estimate='truncated',
solve_args={'min_its': 200},
randomizer_scale=None,
perturb=None):
r"""
Use sqrt-LASSO to choose variables.
Objective function is
$$
\beta \mapsto \|Y-X\beta\|_2 + \sum_{i=1}^p \lambda_i |\beta_i|
$$
where $\lambda$ is `feature_weights`. After solving the problem
treat as if `gaussian` with implied variance and choice of
multiplier. See arxiv.org/abs/1504.08031 for details.
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
Y : ndarray
Shape (n,) -- the response.
feature_weights: [float, sequence]
Penalty weights. An intercept, or other unpenalized
features are handled by setting those entries of
`feature_weights` to 0. If `feature_weights` is
a float, then all parameters are penalized equally.
quadratic : `regreg.identity_quadratic.identity_quadratic` (optional)
An optional quadratic term to be added to the objective.
Can also be a linear term by setting quadratic
coefficient to 0.
covariance : str
One of 'parametric' or 'sandwich'. Method
used to estimate covariance for inference
in second stage.
sigma_estimate : str
One of 'truncated' or 'OLS'. Method
used to estimate $\sigma$ when using
parametric covariance.
solve_args : dict
Arguments passed to solver.
ridge_term : float
How big a ridge term to add?
randomizer_scale : float
Scale for IID components of randomizer.
Returns
-------
L : `selection.randomized.convenience.lasso`
Notes
-----
Unlike other variants of LASSO, this
solves the problem on construction as the active
set is needed to find equivalent gaussian LASSO.
Assumes parametric model is correct for inference,
i.e. does not accept a covariance estimator.
"""
n, p = X.shape
if np.asarray(feature_weights).shape == ():
feature_weights = np.ones(loglike.shape) * feature_weights
mean_diag = np.mean((X ** 2).sum(0))
if ridge_term is None:
ridge_term = np.sqrt(mean_diag) / np.sqrt(n - 1)
if randomizer_scale is None:
randomizer_scale = np.sqrt(mean_diag) * 0.5 * np.sqrt(n / (n - 1.))
if perturb is None:
perturb = np.random.standard_normal(p) * randomizer_scale
randomQ = rr.identity_quadratic(ridge_term, 0, -perturb, 0) # a ridge + linear term
if quadratic is not None:
totalQ = randomQ + quadratic
else:
totalQ = randomQ
soln, sqrt_loss = solve_sqrt_lasso(X,
Y,
weights=feature_weights,
quadratic=totalQ,
solve_args=solve_args,
force_fat=True)
denom = np.linalg.norm(Y - X.dot(soln))
loglike = rr.glm.gaussian(X, Y)
raise NotImplementedError(
'lasso_view needs to be modified so that the initial randomization can be set at construction time')
return lasso(loglike,
np.asarray(feature_weights) * denom,
ridge_term * denom,
randomizer_scale * denom,
randomizer='gaussian',
parametric_cov_estimator=parametric_cov_estimator,
perturb=perturb)
|
|
"""Tweet text preprocessing module, processes Line Delimited JSON files.
Reads Line Delimited JSON file and and processes tweet's text. In text it
replaces entities specified in replacements.json.
"""
from __future__ import print_function
import json
import argparse
import sys
from functools import partial
from MultiprocessFiles import MultiprocessFiles
def update_indices(list_of_indices, delta, start_index):
"""Updates indices in list by delta"""
if list_of_indices is None:
return
for element in list_of_indices:
if element[0] < start_index:
continue
element[0] += delta
element[1] += delta
def replace_entity(entity, tweet, indices_list, list1, list2, list3, list4,
replacements):
"""Replaces entity in tweet based on indices_list
Other lists are just updated so they correspond with new processed text.
"""
for index_list in indices_list:
if index_list[1] > 140:
return None
entity_length = index_list[1] - index_list[0]
replacement_word_length = len(replacements[entity])
tweet['text'] = (tweet['text'][:index_list[0]] + replacements[entity] +
tweet['text'][index_list[1]:])
delta = replacement_word_length - entity_length
update_indices(list1, delta, index_list[0])
update_indices(list2, delta, index_list[0])
update_indices(list3, delta, index_list[0])
update_indices(list4, delta, index_list[0])
update_indices(indices_list, delta, index_list[0])
return True
def replace_entities(tweet, replacements):
'''Replaces entities specified in replacements for one tweet's text'''
if 'entities' not in tweet:
return tweet
if 'user_mentions' in tweet['entities']:
list_of_users = tweet['entities']['user_mentions']
else:
list_of_users = None
if 'urls' in tweet['entities']:
list_of_urls = tweet['entities']['urls']
else:
list_of_urls = None
if 'hashtags' in tweet['entities']:
list_of_hashtags = tweet['entities']['hashtags']
else:
list_of_hashtags = None
if 'symbols' in tweet['entities']:
list_of_symbols = tweet['entities']['symbols']
else:
list_of_symbols = None
if 'media' in tweet['entities']:
list_of_media = tweet['entities']['media']
else:
list_of_media = None
# update indices when replacing entities and check existance
if list_of_users is not None:
list_of_users_indices = [user['indices'] for user in list_of_users]
else:
list_of_users_indices = None
if list_of_urls is not None:
list_of_urls_indices = [url['indices'] for url in list_of_urls]
else:
list_of_urls_indices = None
if list_of_hashtags is not None:
list_of_hashtags_indices = [hashtag['indices'] for hashtag in
list_of_hashtags]
else:
list_of_hashtags_indices = None
if list_of_symbols is not None:
list_of_symbols_indices = [symbol['indices'] for symbol in
list_of_symbols]
else:
list_of_symbols_indices = None
if list_of_media is not None:
list_of_media_indices = [media['indices'] for media in list_of_media]
else:
list_of_media_indices = None
if list_of_users_indices is not None and replacements['user'] is not None:
ret = replace_entity('user', tweet, list_of_users_indices,
list_of_urls_indices, list_of_hashtags_indices,
list_of_symbols_indices, list_of_media_indices,
replacements)
if not ret:
return None
if list_of_urls_indices is not None and replacements['url'] is not None:
ret = replace_entity('url', tweet, list_of_urls_indices,
list_of_users_indices, list_of_hashtags_indices,
list_of_symbols_indices, list_of_media_indices,
replacements)
if not ret:
return None
if (list_of_hashtags_indices is not None and
replacements['hashtag'] is not None):
ret = replace_entity('hashtag', tweet, list_of_hashtags_indices,
list_of_users_indices, list_of_urls_indices,
list_of_symbols_indices, list_of_media_indices,
replacements)
if not ret:
return None
if (list_of_symbols_indices is not None and
replacements['symbol'] is not None):
ret = replace_entity('symbol', tweet, list_of_symbols_indices,
list_of_users_indices, list_of_urls_indices,
list_of_hashtags_indices, list_of_media_indices,
replacements)
if not ret:
return None
if list_of_media_indices is not None and replacements['url'] is not None:
ret = replace_entity('url', tweet, list_of_media_indices,
list_of_symbols_indices, list_of_users_indices,
list_of_urls_indices, list_of_hashtags_indices,
replacements)
if not ret:
return None
# remove field entities
ntweet = {u'text': tweet['text'], u'lang': tweet['lang'],
u'id': tweet['id']}
if 'created_at' in tweet:
ntweet['created_at'] = tweet['created_at']
if 'retweet_id' in tweet:
ntweet['retweet_id'] = tweet['retweet_id']
return ntweet
def preprocess_tweet(min_tokens, max_num_urls, max_num_users, replacements,
tweet_line):
""" Preprocess a single tweet """
try:
tweet = json.loads(tweet_line)
except:
return None
if 'entities' in tweet:
# filter based on num of urls
if len(tweet['entities']['urls']) > max_num_urls:
return None
# filter based on num of user mentions
if len(tweet['entities']['user_mentions']) > max_num_users:
return None
# replace entities
tweet = replace_entities(tweet, replacements)
if tweet is None:
# print('tweet je none')
return None
# filter based on num of tokens
tokens = tweet['text'].split()
list_replacements = replacements.values()
tokens = [x for x in tokens if x not in list_replacements and
x.isalpha() and x.lower() != u'rt']
if len(tokens) < min_tokens:
return None
return tweet
def main():
""" main """
# default parameters
min_tokens = 5
max_num_urls = 2
max_num_users = 3
replacements = json.load(open('replacements.json'))
parser = argparse.ArgumentParser()
parser.add_argument('input_files', help='input file paths comma seperated')
parser.add_argument('output_files',
help='output file paths comma seperated')
parser.add_argument('-t', '--min_tokens', type=int)
parser.add_argument('-r', '--max_urls', type=int)
parser.add_argument('-u', '--max_users', type=int)
parser.add_argument('-n', '--num_jobs', type=int, default=0,
help='number of worker processes to use. Default: \
number of cores')
parser.add_argument('-s', '--queue_size', type=int, default=2000)
args = parser.parse_args()
if args.min_tokens:
min_tokens = args.min_tokens
if args.max_urls:
max_num_urls = args.max_urls
if args.max_users:
max_num_users = args.max_users
infiles = args.input_files.split(',')
outfiles = args.output_files.split(',')
if not len(infiles) == len(outfiles):
print('Input files and output_files do not match in size')
sys.exit(0)
func = partial(preprocess_tweet, min_tokens,
max_num_urls, max_num_users, replacements)
for infile, outfile in zip(infiles, outfiles):
multiprocess = MultiprocessFiles(infile, outfile, func,
num_procs=args.num_jobs,
queue_size=args.queue_size)
multiprocess.run()
if __name__ == '__main__':
main()
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Network-related utilities for supporting libvirt connection code."""
import os
import jinja2
import netaddr
from oslo_utils import strutils
import nova.conf
from nova.network import model
CONF = nova.conf.CONF
def get_net_and_mask(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
net = netaddr.IPNetwork(cidr)
return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
net = netaddr.IPNetwork(cidr)
return int(net.version)
def _get_first_network(network, version):
# Using a generator expression with a next() call for the first element
# of a list since we don't want to evaluate the whole list as we can
# have a lot of subnets
try:
return next(i for i in network['subnets']
if i['version'] == version)
except StopIteration:
pass
def get_injected_network_template(network_info, template=None,
libvirt_virt_type=None):
"""Returns a rendered network template for the given network_info.
:param network_info: `nova.network.models.NetworkInfo` object describing
the network metadata.
:param template: Path to the interfaces template file.
:param libvirt_virt_type: The Libvirt `virt_type`, will be `None` for
other hypervisors..
"""
if not template:
template = CONF.injected_network_template
if not (network_info and template):
return
nets = []
ifc_num = -1
ipv6_is_available = False
for vif in network_info:
if not vif['network'] or not vif['network']['subnets']:
continue
network = vif['network']
# NOTE(bnemec): The template only supports a single subnet per
# interface and I'm not sure how/if that can be fixed, so this
# code only takes the first subnet of the appropriate type.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
if not network.get_meta('injected'):
continue
hwaddress = vif.get('address')
address = None
netmask = None
gateway = ''
broadcast = None
dns = None
routes = []
if subnet_v4:
if subnet_v4.get_meta('dhcp_server') is not None:
continue
if subnet_v4['ips']:
ip = subnet_v4['ips'][0]
address = ip['address']
netmask = model.get_netmask(ip, subnet_v4)
if subnet_v4['gateway']:
gateway = subnet_v4['gateway']['address']
broadcast = str(subnet_v4.as_netaddr().broadcast)
dns = ' '.join([i['address'] for i in subnet_v4['dns']])
for route_ref in subnet_v4['routes']:
(net, mask) = get_net_and_mask(route_ref['cidr'])
route = {'gateway': str(route_ref['gateway']['address']),
'cidr': str(route_ref['cidr']),
'network': net,
'netmask': mask}
routes.append(route)
address_v6 = None
gateway_v6 = ''
netmask_v6 = None
dns_v6 = None
if subnet_v6:
if subnet_v6.get_meta('dhcp_server') is not None:
continue
if subnet_v6['ips']:
ipv6_is_available = True
ip_v6 = subnet_v6['ips'][0]
address_v6 = ip_v6['address']
netmask_v6 = model.get_netmask(ip_v6, subnet_v6)
if subnet_v6['gateway']:
gateway_v6 = subnet_v6['gateway']['address']
dns_v6 = ' '.join([i['address'] for i in subnet_v6['dns']])
net_info = {'name': 'eth%d' % ifc_num,
'hwaddress': hwaddress,
'address': address,
'netmask': netmask,
'gateway': gateway,
'broadcast': broadcast,
'dns': dns,
'routes': routes,
'address_v6': address_v6,
'gateway_v6': gateway_v6,
'netmask_v6': netmask_v6,
'dns_v6': dns_v6,
}
nets.append(net_info)
if not nets:
return
tmpl_path, tmpl_file = os.path.split(template)
env = jinja2.Environment( # nosec
loader=jinja2.FileSystemLoader(tmpl_path), # nosec
trim_blocks=True)
template = env.get_template(tmpl_file)
return template.render({'interfaces': nets,
'use_ipv6': ipv6_is_available,
'libvirt_virt_type': libvirt_virt_type})
def get_network_metadata(network_info):
"""Gets a more complete representation of the instance network information.
This data is exposed as network_data.json in the metadata service and
the config drive.
:param network_info: `nova.network.models.NetworkInfo` object describing
the network metadata.
"""
if not network_info:
return
# IPv4 or IPv6 networks
nets = []
# VIFs, physical NICs, or VLANs. Physical NICs will have type 'phy'.
links = []
# Non-network bound services, such as DNS
services = []
ifc_num = -1
net_num = -1
for vif in network_info:
if not vif.get('network') or not vif['network'].get('subnets'):
continue
network = vif['network']
# NOTE(JoshNang) currently, only supports the first IPv4 and first
# IPv6 subnet on network, a limitation that also exists in the
# network template.
subnet_v4 = _get_first_network(network, 4)
subnet_v6 = _get_first_network(network, 6)
ifc_num += 1
link = None
# Get the VIF or physical NIC data
if subnet_v4 or subnet_v6:
link = _get_eth_link(vif, ifc_num)
links.append(link)
# Add IPv4 and IPv6 networks if they exist
if subnet_v4 and subnet_v4.get('ips'):
net_num += 1
nets.append(_get_nets(vif, subnet_v4, 4, net_num, link['id']))
services += [dns for dns in _get_dns_services(subnet_v4)
if dns not in services]
if subnet_v6 and subnet_v6.get('ips'):
net_num += 1
nets.append(_get_nets(vif, subnet_v6, 6, net_num, link['id']))
services += [dns for dns in _get_dns_services(subnet_v6)
if dns not in services]
return {
"links": links,
"networks": nets,
"services": services
}
def get_ec2_ip_info(network_info):
if not isinstance(network_info, model.NetworkInfo):
network_info = model.NetworkInfo.hydrate(network_info)
ip_info = {}
fixed_ips = network_info.fixed_ips()
ip_info['fixed_ips'] = [
ip['address'] for ip in fixed_ips if ip['version'] == 4]
ip_info['fixed_ip6s'] = [
ip['address'] for ip in fixed_ips if ip['version'] == 6]
ip_info['floating_ips'] = [
ip['address'] for ip in network_info.floating_ips()]
return ip_info
def _get_eth_link(vif, ifc_num):
"""Get a VIF or physical NIC representation.
:param vif: Neutron VIF
:param ifc_num: Interface index for generating name if the VIF's
'devname' isn't defined.
:return: A dict with 'id', 'vif_id', 'type', 'mtu' and
'ethernet_mac_address' as keys
"""
link_id = vif.get('devname')
if not link_id:
link_id = 'interface%d' % ifc_num
# Use 'phy' for physical links. Ethernet can be confusing
if vif.get('type') in model.LEGACY_EXPOSED_VIF_TYPES:
nic_type = vif.get('type')
else:
nic_type = 'phy'
link = {
'id': link_id,
'vif_id': vif['id'],
'type': nic_type,
'mtu': vif['network']['meta'].get('mtu'),
'ethernet_mac_address': vif.get('address'),
}
return link
def _get_nets(vif, subnet, version, net_num, link_id):
"""Get networks for the given VIF and subnet
:param vif: Neutron VIF
:param subnet: Neutron subnet
:param version: IP version as an int, either '4' or '6'
:param net_num: Network index for generating name of each network
:param link_id: Arbitrary identifier for the link the networks are
attached to
"""
net_type = ''
if subnet.get_meta('ipv6_address_mode') is not None:
net_type = '_%s' % subnet.get_meta('ipv6_address_mode')
elif subnet.get_meta('dhcp_server') is not None:
net_info = {
'id': 'network%d' % net_num,
'type': 'ipv%d_dhcp' % version,
'link': link_id,
'network_id': vif['network']['id']
}
return net_info
ip = subnet['ips'][0]
address = ip['address']
if version == 4:
netmask = model.get_netmask(ip, subnet)
elif version == 6:
netmask = str(subnet.as_netaddr().netmask)
net_info = {
'id': 'network%d' % net_num,
'type': 'ipv%d%s' % (version, net_type),
'link': link_id,
'ip_address': address,
'netmask': netmask,
'routes': _get_default_route(version, subnet),
'network_id': vif['network']['id']
}
# Add any additional routes beyond the default route
for route in subnet['routes']:
route_addr = netaddr.IPNetwork(route['cidr'])
new_route = {
'network': str(route_addr.network),
'netmask': str(route_addr.netmask),
'gateway': route['gateway']['address']
}
net_info['routes'].append(new_route)
net_info['services'] = _get_dns_services(subnet)
return net_info
def _get_default_route(version, subnet):
"""Get a default route for a network
:param version: IP version as an int, either '4' or '6'
:param subnet: Neutron subnet
"""
if subnet.get('gateway') and subnet['gateway'].get('address'):
gateway = subnet['gateway']['address']
else:
return []
if version == 4:
return [{
'network': '0.0.0.0',
'netmask': '0.0.0.0',
'gateway': gateway
}]
elif version == 6:
return [{
'network': '::',
'netmask': '::',
'gateway': gateway
}]
def _get_dns_services(subnet):
"""Get the DNS servers for the subnet."""
services = []
if not subnet.get('dns'):
return services
return [{'type': 'dns', 'address': ip.get('address')}
for ip in subnet['dns']]
def get_cached_vifs_with_vlan(network_info):
"""Generates a dict from a list of VIFs that has a vlan tag, with
MAC, VLAN as a key, value.
"""
if network_info is None:
return {}
return {vif['address']: vif['details']['vlan'] for vif in network_info
if vif.get('details', {}).get('vlan')}
def get_cached_vifs_with_trusted(network_info):
"""Generates a dict from a list of VIFs that trusted, MAC as key"""
if network_info is None:
return {}
return {vif['address']: strutils.bool_from_string(
vif['profile'].get('trusted', 'False')) for vif in network_info
if vif.get('profile')}
|
|
#
# Copyright (c) 2017 Luis F. Simoes (github: @lfsimoes)
#
# Licensed under the MIT License. See the LICENSE file for details.
from math import sqrt, log
import os, inspect, pickle
import numpy as np
import PyKEP as pk
from .constants import *
from .lambert import lambert_eval, lambert_optimize_dt
from .multiobjective import rate_traj
# ==================================== ## ==================================== #
def seq(mission, incl_flyby=True):
"Sequence of asteroids visited in the mission."
return [mission[0][0]] + \
[l[0] for l in mission[1:]][::1 if incl_flyby else 2]
def final_mass(mission):
"Final mass, after completing the last leg."
return mission[-1][1]
def tof(mission):
"Time of flight. Duration, in days, of the complete mission."
return mission[-1][2] - mission[0][2]
def resource_rating(mission, **kwargs):
"Resource savings rating (softmin aggregation)."
return rate_traj(final_mass(mission), tof(mission), **kwargs)
def score(mission):
"Calculate the mission's score."
asts_rv = set()
asts_rvfb = set()
score = 0.0
for ast in seq(mission, incl_flyby=True)[1:]: # [1:] skips the Earth
sc = 0.0
if ast in asts_rv and ast not in asts_rvfb:
sc = 0.8 # flyby score
asts_rvfb.add(ast)
elif ast not in asts_rv:
sc = 0.2 # rendezvous score
asts_rv.add(ast)
if ast == 1:
sc *= 1.5 # bonus for the Beletskij asteroid
score += sc
return score
# ==================================== ## ==================================== #
def mission_to_1st_asteroid(ast1, legs1=None):
"""
Initialize a `mission` data structure containing a leg from Earth to the
first asteroid, extended with a self-flyby of the asteroid.
Rendezvous leg uses data obtained from a low-thrust global optimization,
and self-flyby leg determined via the linear acceleration model.
"""
# Summary data for the launch leg loaded from the results of
# mass/time-optimal low-thrust global optimizations described in:
# - http://dx.doi.org/10.2420/AF08.2014.45 (Sec. 2)
# - https://github.com/esa/pagmo/blob/master/src/problem/gtoc5_launch.cpp
if legs1 is None:
# get path to where the current module is located
path = os.path.abspath(os.path.dirname(inspect.getsourcefile(lambda:0)))
# legs1 = pickle.load(open(path + '/mass_optimal_1st_leg.pkl', 'rb'))
legs1 = pickle.load(open(path + '/time_optimal_1st_leg.pkl', 'rb'))
try:
# locate in `legs1` the tuple corresponding to the leg towards `ast1`
leg1 = next(ast_leg for ast_leg in legs1 if ast_leg[0] == ast1)
except StopIteration:
raise Exception('No known launch leg towards asteroid %s (id: %d)' % (
asteroids[ast1].name, ast1)) from None # (PEP 409)
# get the first leg's parameters
dep_m = MASS_MAX
(ast1, arr_m, dep_t, arr_t) = leg1
# add launch
earth_id = 0
mission = [(earth_id, dep_m, dep_t, 0.0, 0.0)]
# add rendezvous leg
mass_rv = arr_m - MASS_EQUIPMENT
dT_rv = arr_t - dep_t
dV_rv = I_sp * G0 * log(dep_m / arr_m)
mission.append(
( ast1, # asteroid UID
mass_rv, # mass at asteroid, after the payload delivery
arr_t, # Epoch at time of departure to self-flyby
dT_rv, # leg dT
dV_rv # leg dV
) )
# add self-flyby leg
mission.append(self_flyby_leg(mission))
return mission
# ==================================== ## ==================================== #
LEG_CACHE = {}
def reset_leg_cache():
global LEG_CACHE
LEG_CACHE = {}
def add_asteroid(mission, next_ast, use_cache=True, stats=None, **kwargs):
"""
Extend `mission` by visiting a new asteroid.
Adds rendezvous and self-flyby legs, thus fully scoring the asteroid.
"""
global LEG_CACHE
assert isinstance(next_ast, (int, np.integer)) and 0 < next_ast <= 7075, \
"Next asteroid should be given as an integer in {1, ..., 7075}."
next_ast = int(next_ast)
if stats is not None:
# increment total number of [rendezvous] legs defined, either from
# a new optimization, or from a cache hit. Ignores feasibility.
# (will be equal to stats.nr_legs_distinct if use_cache==False)
stats.nr_legs += 1
if use_cache:
dep_ast, dep_m, dep_t = mission[-1][:3]
leg_key = (dep_ast, next_ast, dep_t, dep_m)
if not use_cache or leg_key not in LEG_CACHE:
rv_leg = rendezvous_leg(mission, next_ast, stats=stats, **kwargs)
# add leg to cache. Will add a None value if leg is unfeasible
if use_cache:
LEG_CACHE[leg_key] = rv_leg
else:
# obtain the current leg's solution from the cache
rv_leg = LEG_CACHE[leg_key]
# if no feasible rendezvous leg could be found, `mission` is not extended
if rv_leg is None:
return False
# extend `mission` with the rendezvous and self-flyby legs
mission.append(rv_leg)
fb_leg = self_flyby_leg(mission)
mission.append(fb_leg)
# if the mission's available mass or time is exhausted, remove the newly
# added legs, and signal a failure to add and fully score the asteroid
if final_mass(mission) < MASS_MIN or tof(mission) > TIME_MAX:
mission[-2:] = []
return False
return True
# ==================================== ## ==================================== #
# ------------------------------------ # Define rendezvous and self-flyby legs
def rendezvous_leg(mission, next_ast, leg_dT=None, leg_dT_bounds=None,
obj_fun=None, stats=None, **kwargs):
"""
Define the leg that extends `mission` by performing a rendezvous with
asteroid `next_ast`.
"""
if leg_dT is None and leg_dT_bounds is None:
leg_dT_bounds = rvleg_dT_bounds
if obj_fun is None:
obj_fun = gtoc5_rendezvous
dep_ast, dep_m, dep_t = mission[-1][:3]
leg = lambert_optimize_dt(dep_ast, next_ast, dep_t, dep_m,
leg_dT=leg_dT, leg_dT_bounds=leg_dT_bounds,
obj_fun=obj_fun, mission=mission, stats=stats,
**kwargs)
if stats is not None:
stats.nr_legs_distinct += 1
stats.nr_legs_feasible += (1 if leg.feasible else 0)
if not leg.feasible:
return None
mass_rv = leg.arr_m - MASS_EQUIPMENT
return (
next_ast, # asteroid UID
mass_rv, # mass at asteroid, after the payload delivery
leg.arr_t, # Epoch at time of departure to self-flyby
leg.dT,
leg.dV
)
def self_flyby_leg(mission):
"""
Define a self-flyby leg from/to the most recently visited asteroid in
`mission`.
"""
ast, dep_m, dep_t = mission[-1][:3]
mass_fb = dep_m * mass_fb_mult - MASS_PENETRATOR
dT_fb = dep_m * dT_fb_mult
return (
ast, # asteroid UID
mass_fb, # mass at asteroid, after the penetrator's delivery
dep_t + dT_fb, # Epoch at the end of the flyby
dT_fb,
dV_fb
)
# ==================================== ## ==================================== #
# ------------------------------------ # Evaluation of rendezvous legs
class gtoc5_rendezvous(lambert_eval):
def __init__(self, leg_dT, dep_ast, arr_ast, *args, **kwargs):
# log asteroid ids; get their pk.planet instances
assert type(dep_ast) is int, "Expected departure asteroid's ID."
assert type(arr_ast) is int, "Expected arrival asteroid's ID."
self.dep_ast_id = dep_ast
self.arr_ast_id = arr_ast
dep_ast = asteroids[dep_ast]
arr_ast = asteroids[arr_ast]
go_up = super(gtoc5_rendezvous, self)
go_up.__init__(leg_dT, dep_ast, arr_ast, *args, **kwargs)
def select(self, lamb_sol, v_body1, v_body2, stats=None, *args, **kwargs):
"""
Selects one of the Lambert's problem solutions
(in case multiple revolution solutions were found).
Selection criterion: solution with the smallest dV.
"""
if stats is not None:
stats.nr_lambert += 1
# get, per solution, the spacecraft's velocity at each body
v1sc = lamb_sol.get_v1()
v2sc = lamb_sol.get_v2()
# determine each solution's dV
solutions = []
for v1, v2 in zip(v1sc, v2sc):
dV1 = sqrt(sum((a - b) * (a - b) for (a, b) in zip(v_body1, v1)))
dV2 = sqrt(sum((a - b) * (a - b) for (a, b) in zip(v_body2, v2)))
if self.dep_ast_id == 0:
# Earth departure
# on the first leg, we get up to 5 km/s for free
dV1 -= 5000.0
else:
# If we're not coming from Earth, we must take into account the
# dV given by the self-flyby leg just performed at the departure
# asteroid. That maneuver will deliver the projectile and leave
# the spacecraft with a dV of 400 m/s relative to the asteroid,
# in any direction we like.
dV1 -= dV_fb_min
solutions.append((dV1 + dV2, v1, v2))
# pick the solution with smallest dV, and log the spacecraft's
# velocities at each body
self.dV, *self.v_sc = min(solutions)
class gtoc5_rendezvous_agg(gtoc5_rendezvous):
def get_value(self):
"Key value determining the instance's solution quality."
# resource_rating should be maximized, so we return here instead its
# negative for minimization (by lambert_optimize_dt, for instance).
return - self.resource_rating
def inspect(self, mission=None, *args, **kwargs):
assert mission is not None, 'Unknown `mission`. ' \
'Resource rating cannot be calculated'
super(gtoc5_rendezvous_agg, self).inspect(*args, **kwargs)
if self.feasible:
# get aggregate rating, considering arrival mass after the current
# leg, and mission's time of flight including this leg's dT
self.resource_rating = rate_traj(self.arr_m, tof(mission) + self.dT)
def fail(self):
super(gtoc5_rendezvous_agg, self).fail()
# The resource rating ranges in [0, 1]. We signal failure to optimize
# leg with an out-of-bounds value below the worst possible rating (0.0).
self.resource_rating = -1.
|
|
from __future__ import division, unicode_literals
import base64
import io
import itertools
import os
import time
from .fragment import FragmentFD
from ..compat import (
compat_etree_fromstring,
compat_urlparse,
compat_urllib_error,
compat_urllib_parse_urlparse,
compat_struct_pack,
compat_struct_unpack,
)
from ..utils import (
encodeFilename,
fix_xml_ampersands,
sanitize_open,
xpath_text,
)
class DataTruncatedError(Exception):
pass
class FlvReader(io.BytesIO):
"""
Reader for Flv files
The file format is documented in https://www.adobe.com/devnet/f4v.html
"""
def read_bytes(self, n):
data = self.read(n)
if len(data) < n:
raise DataTruncatedError(
'FlvReader error: need %d bytes while only %d bytes got' % (
n, len(data)))
return data
# Utility functions for reading numbers and strings
def read_unsigned_long_long(self):
return compat_struct_unpack('!Q', self.read_bytes(8))[0]
def read_unsigned_int(self):
return compat_struct_unpack('!I', self.read_bytes(4))[0]
def read_unsigned_char(self):
return compat_struct_unpack('!B', self.read_bytes(1))[0]
def read_string(self):
res = b''
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
res += char
return res
def read_box_info(self):
"""
Read a box and return the info as a tuple: (box_size, box_type, box_data)
"""
real_size = size = self.read_unsigned_int()
box_type = self.read_bytes(4)
header_end = 8
if size == 1:
real_size = self.read_unsigned_long_long()
header_end = 16
return real_size, box_type, self.read_bytes(real_size - header_end)
def read_asrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
quality_entry_count = self.read_unsigned_char()
# QualityEntryCount
for i in range(quality_entry_count):
self.read_string()
segment_run_count = self.read_unsigned_int()
segments = []
for i in range(segment_run_count):
first_segment = self.read_unsigned_int()
fragments_per_segment = self.read_unsigned_int()
segments.append((first_segment, fragments_per_segment))
return {
'segment_run': segments,
}
def read_afrt(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
# time scale
self.read_unsigned_int()
quality_entry_count = self.read_unsigned_char()
# QualitySegmentUrlModifiers
for i in range(quality_entry_count):
self.read_string()
fragments_count = self.read_unsigned_int()
fragments = []
for i in range(fragments_count):
first = self.read_unsigned_int()
first_ts = self.read_unsigned_long_long()
duration = self.read_unsigned_int()
if duration == 0:
discontinuity_indicator = self.read_unsigned_char()
else:
discontinuity_indicator = None
fragments.append({
'first': first,
'ts': first_ts,
'duration': duration,
'discontinuity_indicator': discontinuity_indicator,
})
return {
'fragments': fragments,
}
def read_abst(self):
# version
self.read_unsigned_char()
# flags
self.read_bytes(3)
self.read_unsigned_int() # BootstrapinfoVersion
# Profile,Live,Update,Reserved
flags = self.read_unsigned_char()
live = flags & 0x20 != 0
# time scale
self.read_unsigned_int()
# CurrentMediaTime
self.read_unsigned_long_long()
# SmpteTimeCodeOffset
self.read_unsigned_long_long()
self.read_string() # MovieIdentifier
server_count = self.read_unsigned_char()
# ServerEntryTable
for i in range(server_count):
self.read_string()
quality_count = self.read_unsigned_char()
# QualityEntryTable
for i in range(quality_count):
self.read_string()
# DrmData
self.read_string()
# MetaData
self.read_string()
segments_count = self.read_unsigned_char()
segments = []
for i in range(segments_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'asrt'
segment = FlvReader(box_data).read_asrt()
segments.append(segment)
fragments_run_count = self.read_unsigned_char()
fragments = []
for i in range(fragments_run_count):
box_size, box_type, box_data = self.read_box_info()
assert box_type == b'afrt'
fragments.append(FlvReader(box_data).read_afrt())
return {
'segments': segments,
'fragments': fragments,
'live': live,
}
def read_bootstrap_info(self):
total_size, box_type, box_data = self.read_box_info()
assert box_type == b'abst'
return FlvReader(box_data).read_abst()
def read_bootstrap_info(bootstrap_bytes):
return FlvReader(bootstrap_bytes).read_bootstrap_info()
def build_fragments_list(boot_info):
""" Return a list of (segment, fragment) for each fragment in the video """
res = []
segment_run_table = boot_info['segments'][0]
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
first_frag_number = fragment_run_entry_table[0]['first']
fragments_counter = itertools.count(first_frag_number)
for segment, fragments_count in segment_run_table['segment_run']:
# In some live HDS streams (for example Rai), `fragments_count` is
# abnormal and causing out-of-memory errors. It's OK to change the
# number of fragments for live streams as they are updated periodically
if fragments_count == 4294967295 and boot_info['live']:
fragments_count = 2
for _ in range(fragments_count):
res.append((segment, next(fragments_counter)))
if boot_info['live']:
res = res[-2:]
return res
def write_unsigned_int(stream, val):
stream.write(compat_struct_pack('!I', val))
def write_unsigned_int_24(stream, val):
stream.write(compat_struct_pack('!I', val)[1:])
def write_flv_header(stream):
"""Writes the FLV header to stream"""
# FLV header
stream.write(b'FLV\x01')
stream.write(b'\x05')
stream.write(b'\x00\x00\x00\x09')
stream.write(b'\x00\x00\x00\x00')
def write_metadata_tag(stream, metadata):
"""Writes optional metadata tag to stream"""
SCRIPT_TAG = b'\x12'
FLV_TAG_HEADER_LEN = 11
if metadata:
stream.write(SCRIPT_TAG)
write_unsigned_int_24(stream, len(metadata))
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
stream.write(metadata)
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
def remove_encrypted_media(media):
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
'drmAdditionalHeaderSetId' not in e.attrib,
media))
def _add_ns(prop):
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
class F4mFD(FragmentFD):
"""
A downloader for f4m manifests or AdobeHDS.
"""
FD_NAME = 'f4m'
def _get_unencrypted_media(self, doc):
media = doc.findall(_add_ns('media'))
if not media:
self.report_error('No media found')
for e in (doc.findall(_add_ns('drmAdditionalHeader')) +
doc.findall(_add_ns('drmAdditionalHeaderSet'))):
# If id attribute is missing it's valid for all media nodes
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
if 'id' not in e.attrib:
self.report_error('Missing ID in f4m DRM')
media = remove_encrypted_media(media)
if not media:
self.report_error('Unsupported DRM')
return media
def _get_bootstrap_from_url(self, bootstrap_url):
bootstrap = self.ydl.urlopen(bootstrap_url).read()
return read_bootstrap_info(bootstrap)
def _update_live_fragments(self, bootstrap_url, latest_fragment):
fragments_list = []
retries = 30
while (not fragments_list) and (retries > 0):
boot_info = self._get_bootstrap_from_url(bootstrap_url)
fragments_list = build_fragments_list(boot_info)
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
if not fragments_list:
# Retry after a while
time.sleep(5.0)
retries -= 1
if not fragments_list:
self.report_error('Failed to update fragments')
return fragments_list
def _parse_bootstrap_node(self, node, base_url):
# Sometimes non empty inline bootstrap info can be specified along
# with bootstrap url attribute (e.g. dummy inline bootstrap info
# contains whitespace characters in [1]). We will prefer bootstrap
# url over inline bootstrap info when present.
# 1. http://live-1-1.rutube.ru/stream/1024/HDS/SD/C2NKsS85HQNckgn5HdEmOQ/1454167650/S-s604419906/move/four/dirs/upper/1024-576p.f4m
bootstrap_url = node.get('url')
if bootstrap_url:
bootstrap_url = compat_urlparse.urljoin(
base_url, bootstrap_url)
boot_info = self._get_bootstrap_from_url(bootstrap_url)
else:
bootstrap_url = None
bootstrap = base64.b64decode(node.text.encode('ascii'))
boot_info = read_bootstrap_info(bootstrap)
return boot_info, bootstrap_url
def real_download(self, filename, info_dict):
man_url = info_dict['url']
requested_bitrate = info_dict.get('tbr')
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
urlh = self.ydl.urlopen(man_url)
man_url = urlh.geturl()
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244
# and https://github.com/rg3/youtube-dl/issues/7823)
manifest = fix_xml_ampersands(urlh.read().decode('utf-8', 'ignore')).strip()
doc = compat_etree_fromstring(manifest)
formats = [(int(f.attrib.get('bitrate', -1)), f)
for f in self._get_unencrypted_media(doc)]
if requested_bitrate is None or len(formats) == 1:
# get the best format
formats = sorted(formats, key=lambda f: f[0])
rate, media = formats[-1]
else:
rate, media = list(filter(
lambda f: int(f[0]) == requested_bitrate, formats))[0]
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
# From Adobe F4M 3.0 spec:
# The <baseURL> element SHALL be the base URL for all relative
# (HTTP-based) URLs in the manifest. If <baseURL> is not present, said
# URLs should be relative to the location of the containing document.
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, man_url)
live = boot_info['live']
metadata_node = media.find(_add_ns('metadata'))
if metadata_node is not None:
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
else:
metadata = None
fragments_list = build_fragments_list(boot_info)
test = self.params.get('test', False)
if test:
# We only download the first fragment
fragments_list = fragments_list[:1]
total_frags = len(fragments_list)
# For some akamai manifests we'll need to add a query to the fragment url
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
ctx = {
'filename': filename,
'total_frags': total_frags,
'live': live,
}
self._prepare_frag_download(ctx)
dest_stream = ctx['dest_stream']
write_flv_header(dest_stream)
if not live:
write_metadata_tag(dest_stream, metadata)
base_url_parsed = compat_urllib_parse_urlparse(base_url)
self._start_frag_download(ctx)
frags_filenames = []
while fragments_list:
seg_i, frag_i = fragments_list.pop(0)
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
query = []
if base_url_parsed.query:
query.append(base_url_parsed.query)
if akamai_pv:
query.append(akamai_pv.strip(';'))
if info_dict.get('extra_param_to_segment_url'):
query.append(info_dict['extra_param_to_segment_url'])
url_parsed = base_url_parsed._replace(path=base_url_parsed.path + name, query='&'.join(query))
frag_filename = '%s-%s' % (ctx['tmpfilename'], name)
try:
success = ctx['dl'].download(frag_filename, {'url': url_parsed.geturl()})
if not success:
return False
(down, frag_sanitized) = sanitize_open(frag_filename, 'rb')
down_data = down.read()
down.close()
reader = FlvReader(down_data)
while True:
try:
_, box_type, box_data = reader.read_box_info()
except DataTruncatedError:
if test:
# In tests, segments may be truncated, and thus
# FlvReader may not be able to parse the whole
# chunk. If so, write the segment as is
# See https://github.com/rg3/youtube-dl/issues/9214
dest_stream.write(down_data)
break
raise
if box_type == b'mdat':
dest_stream.write(box_data)
break
if live:
os.remove(encodeFilename(frag_sanitized))
else:
frags_filenames.append(frag_sanitized)
except (compat_urllib_error.HTTPError, ) as err:
if live and (err.code == 404 or err.code == 410):
# We didn't keep up with the live window. Continue
# with the next available fragment.
msg = 'Fragment %d unavailable' % frag_i
self.report_warning(msg)
fragments_list = []
else:
raise
if not fragments_list and not test and live and bootstrap_url:
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
total_frags += len(fragments_list)
if fragments_list and (fragments_list[0][1] > frag_i + 1):
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
self.report_warning(msg)
self._finish_frag_download(ctx)
for frag_file in frags_filenames:
os.remove(encodeFilename(frag_file))
return True
|
|
from __future__ import absolute_import
import logging
from weblib.etree import get_node_text
from weblib.text import find_number
from weblib.const import NULL
from weblib.error import DataNotFound
from weblib.encoding import make_unicode
import six
from grab.util.misc import deprecated
from grab.error import GrabMisuseError
from grab import error
class DeprecatedThings(object):
__slots__ = ()
"""
This super-class contains all deprecated things that are
still in Grab class for back-ward compatibility.
"""
# Deprecated methods from grab.ext.text module
# ********************************************
@deprecated(use_instead='grab.doc.text_search')
def search(self, anchor, byte=False):
return self.doc.text_search(anchor, byte=byte)
@deprecated(use_instead='grab.doc.text_assert')
def assert_substring(self, anchor, byte=False):
return self.doc.text_assert(anchor, byte=byte)
@deprecated(use_instead='grab.doc.text_assert_any')
def assert_substrings(self, anchors, byte=False):
return self.doc.text_assert_any(anchors, byte=byte)
# Deprecated methods from grab.ext.rex module
# ********************************************
@deprecated(use_instead='grab.doc.rex_text')
def rex_text(self, regexp, flags=0, byte=False, default=NULL):
return self.doc.rex_text(regexp, flags=flags,
byte=byte, default=default)
@deprecated(use_instead='grab.doc.rex_search')
def rex(self, regexp, flags=0, byte=False, default=NULL):
return self.doc.rex_search(regexp, flags=flags,
byte=byte, default=default)
@deprecated(use_instead='grab.doc.rex_assert')
def assert_rex(self, regexp, byte=False):
return self.doc.rex_assert(regexp, byte=byte)
# Deprecated methods from grab.ext.lxml
# *************************************
@property
@deprecated(use_instead='grab.doc.tree')
def tree(self):
return self.doc.tree
@deprecated(use_instead='grab.doc.build_html_tree')
def build_html_tree(self):
return self.doc.build_html_tree()
@property
@deprecated(use_instead='grab.doc.xml_tree')
def xml_tree(self):
return self.doc.xml_tree
@deprecated(use_instead='grab.doc.build_xml_tree()')
def build_xml_tree(self):
return self.doc.build_xml_tree()
@deprecated()
def find_link(self, href_pattern, make_absolute=True):
"""
Find link in response body which href value matches ``href_pattern``.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.response.url)
if isinstance(href_pattern, six.text_type):
raise GrabMisuseError('Method `find_link` accepts only '
'byte-string argument')
href_pattern = make_unicode(href_pattern)
for elem, attr, link, pos in self.tree.iterlinks():
if elem.tag == 'a' and href_pattern in link:
return link
return None
@deprecated()
def find_link_rex(self, rex, make_absolute=True):
"""
Find link matched the given regular expression in response body.
Returns found url or None.
"""
if make_absolute:
self.tree.make_links_absolute(self.response.url)
for elem, attr, link, pos in self.tree.iterlinks():
if elem.tag == 'a':
match = rex.search(link)
if match:
# That does not work for string object
# link.match = match
return link
return None
@deprecated(use_instead='grab.doc.select().node()')
def xpath(self, path, default=NULL, filter=None):
if filter is not None:
raise GrabMisuseError('Argument `filter` is not supported anymore')
return self.doc.select(path).node(default=default)
@deprecated(use_instead='grab.doc.select().one()')
def xpath_one(self, path, default=NULL, filter=None):
if filter is not None:
raise GrabMisuseError('Argument `filter` is not supported anymore')
return self.doc.select(path).node(default=default)
@deprecated(use_instead='grab.doc.select()')
def xpath_list(self, path, filter=None):
if filter is not None:
raise GrabMisuseError('Argument `filter` is not supported anymore')
return self.doc.select(path).node_list()
@deprecated(use_instead='grab.doc.select().text()')
def xpath_text(self, path, default=NULL, filter=None, smart=False,
normalize_space=True):
if filter is not None:
raise GrabMisuseError('Argument `filter` is not supported anymore')
return self.doc.select(path).text(default=default, smart=smart,
normalize_space=normalize_space)
@deprecated(use_instead='grab.doc.select().number()')
def xpath_number(self, path, default=NULL, filter=None,
ignore_spaces=False,
smart=False, make_int=True):
if filter is not None:
raise GrabMisuseError('Argument `filter` is not supported anymore')
return self.doc.select(path).number(default=default, smart=smart,
ignore_spaces=ignore_spaces,
make_int=make_int)
@deprecated(use_instead='grab.doc.select().exists()')
def xpath_exists(self, path):
return self.doc.select(path).exists()
@deprecated()
def css(self, *args, **kwargs):
return self.css_one(*args, **kwargs)
@deprecated()
def css_one(self, path, default=NULL):
"""
Get first element which matches the given css path
or raise DataNotFound.
"""
try:
return self.css_list(path)[0]
except IndexError:
if default is NULL:
raise DataNotFound('CSS path not found: %s' % path)
else:
return default
@deprecated()
def css_list(self, path):
"""
Find all elements which match given css path.
"""
return self.tree.cssselect(path)
@deprecated()
def css_text(self, path, default=NULL, smart=False, normalize_space=True):
"""
Get normalized text of node which matches the css path.
"""
try:
return get_node_text(self.css_one(path), smart=smart,
normalize_space=normalize_space)
except IndexError:
if default is NULL:
raise
else:
return default
@deprecated()
def css_number(self, path, default=NULL, ignore_spaces=False, smart=False,
make_int=True):
"""
Find number in normalized text of node which
matches the given css path.
"""
try:
text = self.css_text(path, smart=smart)
return find_number(text, ignore_spaces=ignore_spaces,
make_int=make_int)
except IndexError:
if default is NULL:
raise
else:
return default
@deprecated()
def assert_css(self, path):
"""
If css path is not found then raise `DataNotFound` exception.
"""
self.css_one(path)
@deprecated()
def assert_xpath(self, path):
"""
If xpath path is not found then raise `DataNotFound` exception.
"""
self.xpath_one(path)
@deprecated()
def css_exists(self, path):
"""
Return True if at least one element with specified css path exists.
"""
return len(self.css_list(path)) > 0
@deprecated()
def strip_tags(self, content, smart=False):
"""
Strip tags from the HTML content.
"""
from lxml.html import fromstring
return get_node_text(fromstring(content), smart=smart)
# Methods from deprecated grab.ext.pquery module
# **********************************************
@deprecated(use_instead='grab.doc.pyquery()')
def pyquery(self, query):
return self.doc.pyquery(query)
# Response related things
# ***********************
# Backward compat.
def _get_response(self):
return self.doc
def _set_response(self, val):
self.doc = val
response = property(_get_response, _set_response)
@deprecated(use_instead='grab.setup_document')
def fake_response(self, *args, **kwargs):
return self.setup_document(*args, **kwargs)
# Cookies
# *******
@deprecated(use_instead='grab.cookies.load_from_file')
def load_cookies(self, path, file_required=True):
self.cookies.load_from_file(path)
@deprecated(use_instead='grab.cookies.save_to_file')
def dump_cookies(self, path):
self.cookies.save_to_file(path)
@deprecated(use_instead='grab.proxylist.load_file OR '
'grab.proxylist.load_url')
def load_proxylist(self, source, source_type, proxy_type='http',
auto_init=True, auto_change=True,
**kwargs):
# self.proxylist = ProxyList(source, source_type,
# proxy_type=proxy_type, **kwargs)
if source_type == 'text_file':
self.proxylist.load_file(source, proxy_type=proxy_type)
elif source_type == 'url':
self.proxylist.load_url(source, proxy_type=proxy_type)
else:
raise error.GrabMisuseError(
'Unknown proxy source type: %s' % source_type)
# self.proxylist.setup(auto_change=auto_change, auto_init=auto_init)
self.setup(proxy_auto_change=auto_change)
if not auto_change and auto_init:
self.change_proxy()
# Methods from deprecated grab.ext.form module
# **********************************************
@deprecated(use_instead='grab.doc.choose_form')
def choose_form(self, number=None, id=None, name=None, xpath=None):
return self.doc.choose_form(number=number, id=id,
name=name, xpath=xpath)
@property
def form(self):
logging.error('This attribut is deprecated. '
'Use grab.doc.form instead.')
return self.doc.form
@deprecated(use_instead='grab.doc.set_input')
def set_input(self, name, value):
return self.doc.set_input(name, value)
@deprecated(use_instead='grab.doc.set_input_by_id')
def set_input_by_id(self, _id, value):
return self.doc.set_input_by_id(_id, value)
@deprecated(use_instead='grab.doc.set_input_by_number')
def set_input_by_number(self, number, value):
return self.doc.set_input_by_number(number, value)
@deprecated(use_instead='grab.doc.set_input_by_xpath')
def set_input_by_xpath(self, xpath, value):
return self.doc.set_input_by_xpath(xpath, value)
@deprecated(use_instead='grab.doc.submit')
def submit(self, submit_name=None, make_request=True,
url=None, extra_post=None):
return self.doc.submit(submit_name=submit_name,
make_request=make_request,
url=url, extra_post=extra_post)
@deprecated(use_instead='grab.doc.form_fields')
def form_fields(self):
return self.doc.form_fields()
@deprecated(use_instead='grab.doc.choose_form_by_element')
def choose_form_by_element(self, xpath):
return self.doc.choose_form_by_element(xpath)
|
|
"Materialized Path Trees"
import operator
from numconv import NumConv
from django.core import serializers
from django.db import models, transaction, connection
from django.db.models import Q
from treebeard.models import Node
from treebeard.exceptions import InvalidMoveToDescendant, PathOverflow
class MP_NodeQuerySet(models.query.QuerySet):
"""
Custom queryset for the tree node manager.
Needed only for the customized delete method.
"""
def delete(self, known_children=False):
"""
Custom delete method, will remove all descendant nodes to ensure a
consistent tree (no orphans)
:returns: ``None``
"""
if known_children:
# we already know the children, let's call the default django
# delete method and let it handle the removal of the user's
# foreign keys...
super(MP_NodeQuerySet, self).delete()
else:
# we'll have to manually run through all the nodes that are going
# to be deleted and remove nodes from the list if an ancestor is
# already getting removed, since that would be redundant
removed = {}
for node in self.order_by('depth', 'path'):
found = False
for depth in range(1, len(node.path) / node.steplen):
path = node._get_basepath(node.path, depth)
if path in removed:
# we are already removing a parent of this node
# skip
found = True
break
if not found:
removed[node.path] = node
# ok, got the minimal list of nodes to remove...
# we must also remove their children
# and update every parent node's numchild attribute
# LOTS OF FUN HERE!
parents = {}
toremove = []
for path, node in removed.items():
parentpath = node._get_basepath(node.path, node.depth - 1)
if parentpath:
if parentpath not in parents:
parents[parentpath] = node.get_parent(True)
parent = parents[parentpath]
if parent and parent.numchild > 0:
parent.numchild -= 1
parent.save()
if not node.is_leaf():
toremove.append(Q(path__startswith=node.path))
else:
toremove.append(Q(path=node.path))
# uh, django will handle this as a SELECT and then a DELETE of
# ids..
# status: NOT SURE IF WANT, maybe add custom sql here
if toremove:
self.model.objects.filter(
reduce(operator.or_, toremove)).delete(known_children=True)
transaction.commit_unless_managed()
class MP_NodeManager(models.Manager):
"Custom manager for nodes."
def get_query_set(self):
"Sets the custom queryset as the default."
return MP_NodeQuerySet(self.model).order_by('path')
class MP_Node(Node):
"Abstract model to create your own Materialized Path Trees."
steplen = 4
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
node_order_by = []
path = models.CharField(max_length=255, unique=True)
depth = models.PositiveIntegerField()
numchild = models.PositiveIntegerField(default=0)
objects = MP_NodeManager()
numconv_obj_ = None
@classmethod
def _int2str(cls, num):
return cls.numconv_obj().int2str(num)
@classmethod
def _str2int(cls, num):
return cls.numconv_obj().str2int(num)
@classmethod
def numconv_obj(cls):
if cls.numconv_obj_ is None:
cls.numconv_obj_ = NumConv(len(cls.alphabet), cls.alphabet)
return cls.numconv_obj_
@classmethod
def add_root(cls, **kwargs):
"""
Adds a root node to the tree.
:raise PathOverflow: when no more root objects can be added
"""
# do we have a root node already?
last_root = cls.get_last_root_node()
if last_root and last_root.node_order_by:
# there are root nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return last_root.add_sibling('sorted-sibling', **kwargs)
if last_root:
# adding the new root node as the last one
newpath = cls._inc_path(last_root.path)
else:
# adding the first root node
newpath = cls._get_path(None, 1, 1)
# creating the new object
newobj = cls(**kwargs)
newobj.depth = 1
newobj.path = newpath
# saving the instance before returning it
newobj.save()
transaction.commit_unless_managed()
return newobj
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"Dumps a tree branch to a python data structure."
# Because of fix_tree, this method assumes that the depth
# and numchild properties in the nodes can be incorrect,
# so no helper methods are used
qset = cls._get_serializable_model().objects.all()
if parent:
qset = qset.filter(path__startswith=parent.path)
ret, lnk = [], {}
for pyobj in serializers.serialize('python', qset):
# django's serializer stores the attributes in 'fields'
fields = pyobj['fields']
path = fields['path']
depth = len(path) / cls.steplen
# this will be useless in load_bulk
del fields['depth']
del fields['path']
del fields['numchild']
if 'id' in fields:
# this happens immediately after a load_bulk
del fields['id']
newobj = {'data': fields}
if keep_ids:
newobj['id'] = pyobj['pk']
if (not parent and depth == 1) or \
(parent and len(path) == len(parent.path)):
ret.append(newobj)
else:
parentpath = cls._get_basepath(path, depth - 1)
parentobj = lnk[parentpath]
if 'children' not in parentobj:
parentobj['children'] = []
parentobj['children'].append(newobj)
lnk[path] = newobj
return ret
@classmethod
def find_problems(cls):
"""
Checks for problems in the tree structure, problems can occur when:
1. your code breaks and you get incomplete transactions (always
use transactions!)
2. changing the ``steplen`` value in a model (you must
:meth:`dump_bulk` first, change ``steplen`` and then
:meth:`load_bulk`
:returns: A tuple of five lists:
1. a list of ids of nodes with characters not found in the
``alphabet``
2. a list of ids of nodes when a wrong ``path`` length
according to ``steplen``
3. a list of ids of orphaned nodes
4. a list of ids of nodes with the wrong depth value for
their path
5. a list of ids nodes that report a wrong number of children
"""
evil_chars, bad_steplen, orphans = [], [], []
wrong_depth, wrong_numchild = [], []
for node in cls.objects.all():
found_error = False
for char in node.path:
if char not in cls.alphabet:
evil_chars.append(node.id)
found_error = True
break
if found_error:
continue
if len(node.path) % cls.steplen:
bad_steplen.append(node.id)
continue
try:
node.get_parent(True)
except cls.DoesNotExist:
orphans.append(node.id)
continue
if node.depth != len(node.path) / cls.steplen:
wrong_depth.append(node.id)
continue
real_numchild = cls.objects.filter(
path__range=cls._get_children_path_interval(node.path)).extra(
where=['LENGTH(path)/%d=%d' % (cls.steplen,
node.depth + 1)]).count()
if real_numchild != node.numchild:
wrong_numchild.append(node.id)
continue
return evil_chars, bad_steplen, orphans, wrong_depth, wrong_numchild
@classmethod
def fix_tree(cls, destructive=False):
"""
Solves some problems that can appear when transactions are not used and
a piece of code breaks, leaving the tree in an inconsistent state.
The problems this method solves are:
1. Nodes with an incorrect ``depth`` or ``numchild`` values due to
incorrect code and lack of database transactions.
2. "Holes" in the tree. This is normal if you move/delete nodes a
lot. Holes in a tree don't affect performance,
3. Incorrect ordering of nodes when ``node_order_by`` is enabled.
Ordering is enforced on *node insertion*, so if an attribute in
``node_order_by`` is modified after the node is inserted, the
tree ordering will be inconsistent.
:param destructive:
A boolean value. If True, a more agressive fix_tree method will be
attemped. If False (the default), it will use a safe (and fast!)
fix approach, but it will only solve the ``depth`` and
``numchild`` nodes, it won't fix the tree holes or broken path
ordering.
.. warning::
Currently what the ``destructive`` method does is:
1. Backup the tree with :meth:`dump_data`
2. Remove all nodes in the tree.
3. Restore the tree with :meth:`load_data`
So, even when the primary keys of your nodes will be preserved,
this method isn't foreign-key friendly. That needs complex
in-place tree reordering, not available at the moment (hint:
patches are welcome).
"""
if destructive:
dump = cls.dump_bulk(None, True)
cls.objects.all().delete()
cls.load_bulk(dump, None, True)
else:
cursor = connection.cursor()
# fix the depth field
# we need the WHERE to speed up postgres
sql = "UPDATE %s " \
"SET depth=LENGTH(path)/%%s " \
"WHERE depth!=LENGTH(path)/%%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, cls.steplen]
cursor.execute(sql, vals)
# fix the numchild field
vals = ['_' * cls.steplen]
# the cake and sql portability are a lie
if cls.get_database_engine() == 'mysql':
sql = "SELECT tbn1.path, tbn1.numchild, (" \
"SELECT COUNT(1) " \
"FROM %(table)s AS tbn2 " \
"WHERE tbn2.path LIKE " \
"CONCAT(tbn1.path, %%s)) AS real_numchild " \
"FROM %(table)s AS tbn1 " \
"HAVING tbn1.numchild != real_numchild" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
else:
subquery = "(SELECT COUNT(1) FROM %(table)s AS tbn2" \
" WHERE tbn2.path LIKE tbn1.path||%%s)"
sql = "SELECT tbn1.path, tbn1.numchild, " + subquery + " " \
"FROM %(table)s AS tbn1 " \
"WHERE tbn1.numchild != " + subquery
sql = sql % {
'table': connection.ops.quote_name(cls._meta.db_table)}
# we include the subquery twice
vals *= 2
cursor.execute(sql, vals)
sql = "UPDATE %(table)s " \
"SET numchild=%%s " \
"WHERE path=%%s" % {
'table': connection.ops.quote_name(cls._meta.db_table)}
for node_data in cursor.fetchall():
vals = [node_data[2], node_data[0]]
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def get_tree(cls, parent=None):
"""
:returns: A *queryset* of nodes ordered as DFS, including the parent.
If no parent is given, the entire tree is returned.
"""
if parent is None:
# return the entire tree
return cls.objects.all()
if not parent.is_leaf():
return cls.objects.filter(path__startswith=parent.path,
depth__gte=parent.depth)
return cls.objects.filter(pk=parent.id)
@classmethod
def get_root_nodes(cls):
":returns: A queryset containing the root nodes in the tree."
return cls.objects.filter(depth=1)
@classmethod
def get_descendants_group_count(cls, parent=None):
"""
Helper for a very common case: get a group of siblings and the number
of *descendants* in every sibling.
"""
#~
# disclaimer: this is the FOURTH implementation I wrote for this
# function. I really tried to make it return a queryset, but doing so
# with a *single* query isn't trivial with Django's ORM.
# ok, I DID manage to make Django's ORM return a queryset here,
# defining two querysets, passing one subquery in the tables parameters
# of .extra() of the second queryset, using the undocumented order_by
# feature, and using a HORRIBLE hack to avoid django quoting the
# subquery as a table, BUT (and there is always a but) the hack didn't
# survive turning the QuerySet into a ValuesQuerySet, so I just used
# good old SQL.
# NOTE: in case there is interest, the hack to avoid django quoting the
# subquery as a table, was adding the subquery to the alias cache of
# the queryset's query object:
#
# qset.query.quote_cache[subquery] = subquery
#
# If there is a better way to do this in an UNMODIFIED django 1.0, let
# me know.
#~
if parent:
depth = parent.depth + 1
params = cls._get_children_path_interval(parent.path)
extrand = 'AND path BETWEEN %s AND %s'
else:
depth = 1
params = []
extrand = ''
sql = 'SELECT * FROM %(table)s AS t1 INNER JOIN ' \
' (SELECT ' \
' SUBSTR(path, 1, %(subpathlen)s) AS subpath, ' \
' COUNT(1)-1 AS count ' \
' FROM %(table)s ' \
' WHERE depth >= %(depth)s %(extrand)s' \
' GROUP BY subpath) AS t2 ' \
' ON t1.path=t2.subpath ' \
' ORDER BY t1.path' % {
'table': connection.ops.quote_name(cls._meta.db_table),
'subpathlen': depth * cls.steplen,
'depth': depth,
'extrand': extrand}
cursor = connection.cursor()
cursor.execute(sql, params)
ret = []
field_names = [field[0] for field in cursor.description]
for node_data in cursor.fetchall():
node = cls(**dict(zip(field_names, node_data[:-2])))
node.descendants_count = node_data[-1]
ret.append(node)
transaction.commit_unless_managed()
return ret
def get_depth(self):
":returns: the depth (level) of the node"
return self.depth
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
qset = self.__class__.objects.filter(depth=self.depth)
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
qset = qset.filter(
path__range=self._get_children_path_interval(parentpath))
return qset
def get_children(self):
":returns: A queryset of all the node's children"
if self.is_leaf():
return self.__class__.objects.none()
return self.__class__.objects.filter(depth=self.depth + 1,
path__range=self._get_children_path_interval(self.path))
def get_next_sibling(self):
"""
:returns: The next node's sibling, or None if it was the rightmost
sibling.
"""
try:
return self.get_siblings().filter(path__gt=self.path)[0]
except IndexError:
return None
def get_descendants(self):
"""
:returns: A queryset of all the node's descendants as DFS, doesn't
include the node itself
"""
return self.__class__.get_tree(self).exclude(pk=self.id)
def get_prev_sibling(self):
"""
:returns: The previous node's sibling, or None if it was the leftmost
sibling.
"""
try:
return self.get_siblings().filter(path__lt=self.path).reverse()[0]
except IndexError:
return None
def get_children_count(self):
"""
:returns: The number the node's children, calculated in the most
efficient possible way.
"""
return self.numchild
def is_sibling_of(self, node):
"""
:returns: ``True`` if the node if a sibling of another node given as an
argument, else, returns ``False``
"""
aux = self.depth == node.depth
if self.depth > 1:
# making sure the non-root nodes share a parent
parentpath = self._get_basepath(self.path, self.depth - 1)
return aux and node.path.startswith(parentpath)
return aux
def is_child_of(self, node):
"""
:returns: ``True`` is the node if a child of another node given as an
argument, else, returns ``False``
"""
return (self.path.startswith(node.path) and
self.depth == node.depth + 1)
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node if a descendant of another node given
as an argument, else, returns ``False``
"""
return self.path.startswith(node.path) and self.depth > node.depth
def add_child(self, **kwargs):
"""
Adds a child to the node.
:raise PathOverflow: when no more child nodes can be added
"""
if not self.is_leaf() and self.node_order_by:
# there are child nodes and node_order_by has been set
# delegate sorted insertion to add_sibling
return self.get_last_child().add_sibling('sorted-sibling',
**kwargs)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth + 1
if not self.is_leaf():
# adding the new child as the last one
newobj.path = self._inc_path(self.get_last_child().path)
else:
# the node had no children, adding the first child
newobj.path = self._get_path(self.path, newobj.depth, 1)
if len(newobj.path) > \
newobj.__class__._meta.get_field('path').max_length:
raise PathOverflow('The new node is too deep in the tree, try'
' increasing the path.max_length property'
' and UPDATE your database')
# saving the instance before returning it
newobj.save()
newobj._cached_parent_obj = self
# we increase the numchild value of the object in memory, but can't
# save because that makes this django 1.0 compatible code explode
self.numchild += 1
# we need to use a raw query
sql = "UPDATE %(table)s " \
"SET numchild=numchild+1 " \
"WHERE path=%%s" % {
'table': connection.ops.quote_name(
self.__class__._meta.db_table)}
cursor = connection.cursor()
cursor.execute(sql, [self.path])
transaction.commit_unless_managed()
return newobj
def add_sibling(self, pos=None, **kwargs):
"""
Adds a new node as a sibling to the current node object.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
pos = self._fix_add_sibling_opts(pos)
# creating a new object
newobj = self.__class__(**kwargs)
newobj.depth = self.depth
if pos == 'sorted-sibling':
siblings = self.get_sorted_pos_queryset(
self.get_siblings(), newobj)
try:
newpos = self._get_lastpos_in_path(siblings.all()[0].path)
except IndexError:
newpos = None
if newpos is None:
pos = 'last-sibling'
else:
newpos, siblings = None, []
stmts = []
_, newpath = self._move_add_sibling_aux(pos, newpos,
self.depth, self, siblings, stmts, None, False)
parentpath = self._get_basepath(newpath, self.depth - 1)
if parentpath:
stmts.append(self._get_sql_update_numchild(parentpath, 'inc'))
cursor = connection.cursor()
for sql, vals in stmts:
cursor.execute(sql, vals)
# saving the instance before returning it
newobj.path = newpath
newobj.save()
transaction.commit_unless_managed()
return newobj
def get_root(self):
":returns: the root node for the current node object."
return self.__class__.objects.get(path=self.path[0:self.steplen])
def get_ancestors(self):
"""
:returns: A queryset containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
paths = [self.path[0:pos]
for pos in range(0, len(self.path), self.steplen)[1:]]
return self.__class__.objects.filter(path__in=paths).order_by('depth')
def get_parent(self, update=False):
"""
:returns: the parent node of the current node object.
Caches the result in the object itself to help in loops.
"""
depth = len(self.path) / self.steplen
if depth <= 1:
return
try:
if update:
del self._cached_parent_obj
else:
return self._cached_parent_obj
except AttributeError:
pass
parentpath = self._get_basepath(self.path, depth - 1)
self._cached_parent_obj = self.__class__.objects.get(path=parentpath)
return self._cached_parent_obj
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
:raise PathOverflow: when the library can't make room for the
node's new position
"""
pos = self._fix_move_opts(pos)
oldpath = self.path
# initialize variables and if moving to a child, updates "move to
# child" to become a "move to sibling" if possible (if it can't
# be done, it means that we are adding the first child)
pos, target, newdepth, siblings, newpos = self._fix_move_to_child(pos,
target, target.depth)
if target.is_descendant_of(self):
raise InvalidMoveToDescendant("Can't move node to a descendant.")
if oldpath == target.path and (
(pos == 'left') or \
(pos in ('right', 'last-sibling') and \
target.path == target.get_last_sibling().path) or \
(pos == 'first-sibling' and \
target.path == target.get_first_sibling().path)):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
siblings = self.get_sorted_pos_queryset(
target.get_siblings(), self)
try:
newpos = self._get_lastpos_in_path(siblings.all()[0].path)
except IndexError:
newpos = None
if newpos is None:
pos = 'last-sibling'
stmts = []
# generate the sql that will do the actual moving of nodes
oldpath, newpath = self._move_add_sibling_aux(pos, newpos, newdepth,
target, siblings, stmts, oldpath, True)
# updates needed for mysql and children count in parents
self._updates_after_move(oldpath, newpath, stmts)
cursor = connection.cursor()
for sql, vals in stmts:
cursor.execute(sql, vals)
transaction.commit_unless_managed()
@classmethod
def _get_basepath(cls, path, depth):
":returns: The base path of another path up to a given depth"
if path:
return path[0:(depth) * cls.steplen]
return ''
@classmethod
def _get_path(cls, path, depth, newstep):
"""
Builds a path given some values
:param path: the base path
:param depth: the depth of the node
:param newstep: the value (integer) of the new step
"""
parentpath = cls._get_basepath(path, depth - 1)
key = cls._int2str(newstep)
return '%s%s%s' % (parentpath,
'0' * (cls.steplen - len(key)),
key)
@classmethod
def _inc_path(cls, path):
":returns: The path of the next sibling of a given node path."
newpos = cls._str2int(path[-cls.steplen:]) + 1
key = cls._int2str(newpos)
if len(key) > cls.steplen:
raise PathOverflow("Path Overflow from: '%s'" % (path, ))
return '%s%s%s' % (path[:-cls.steplen],
'0' * (cls.steplen - len(key)),
key)
@classmethod
def _get_lastpos_in_path(cls, path):
":returns: The integer value of the last step in a path."
return cls._str2int(path[-cls.steplen:])
@classmethod
def _get_parent_path_from_path(cls, path):
":returns: The parent path for a given path"
if path:
return path[0:len(path) - cls.steplen]
return ''
@classmethod
def _get_children_path_interval(cls, path):
":returns: An interval of all possible children paths for a node."
return (path + cls.alphabet[0] * cls.steplen,
path + cls.alphabet[-1] * cls.steplen)
@classmethod
def _move_add_sibling_aux(cls, pos, newpos, newdepth, target, siblings,
stmts, oldpath=None, movebranch=False):
"""
Handles the reordering of nodes and branches when adding/moving
nodes.
:returns: A tuple containing the old path and the new path.
"""
if pos == 'last-sibling' \
or (pos == 'right' and target == target.get_last_sibling()):
# easy, the last node
last = target.get_last_sibling()
newpath = cls._inc_path(last.path)
if movebranch:
stmts.append(cls._get_sql_newpath_in_branches(oldpath,
newpath))
else:
# do the UPDATE dance
if newpos is None:
siblings = target.get_siblings()
siblings = {'left': siblings.filter(path__gte=target.path),
'right': siblings.filter(path__gt=target.path),
'first-sibling': siblings}[pos]
basenum = cls._get_lastpos_in_path(target.path)
newpos = {'first-sibling': 1,
'left': basenum,
'right': basenum + 1}[pos]
newpath = cls._get_path(target.path, newdepth, newpos)
for node in siblings.reverse():
# moving the siblings (and their branches) at the right of the
# related position one step to the right
sql, vals = cls._get_sql_newpath_in_branches(node.path,
cls._inc_path(node.path))
stmts.append((sql, vals))
if movebranch:
if oldpath.startswith(node.path):
# if moving to a parent, update oldpath since we just
# increased the path of the entire branch
oldpath = vals[0] + oldpath[len(vals[0]):]
if target.path.startswith(node.path):
# and if we moved the target, update the object
# django made for us, since the update won't do it
# maybe useful in loops
target.path = vals[0] + target.path[len(vals[0]):]
if movebranch:
# node to move
stmts.append(cls._get_sql_newpath_in_branches(oldpath,
newpath))
return oldpath, newpath
def _fix_move_to_child(self, pos, target, newdepth):
"Update preliminar vars in :meth:`move` when moving to a child"
newdepth = target.depth
parent = None
newpos = None
siblings = []
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
parent = target
newdepth += 1
if target.is_leaf():
# moving as a target's first child
newpos = 1
pos = 'first-sibling'
siblings = self.__class__.objects.none()
else:
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
# this is not for save(), since if needed, will be handled with a
# custom UPDATE, this is only here to update django's object,
# should be useful in loops
parent.numchild += 1
parent = None
return pos, target, newdepth, siblings, newpos
@classmethod
def _updates_after_move(cls, oldpath, newpath, stmts):
"""
Updates the list of sql statements needed after moving nodes.
1. :attr:`depth` updates *ONLY* needed by mysql databases (*sigh*)
2. update the number of children of parent nodes
"""
if (cls.get_database_engine() == 'mysql' and
len(oldpath) != len(newpath)):
# no words can describe how dumb mysql is
# we must update the depth of the branch in a different query
stmts.append(cls._get_sql_update_depth_in_branch(newpath))
oldparentpath = cls._get_parent_path_from_path(oldpath)
newparentpath = cls._get_parent_path_from_path(newpath)
if (not oldparentpath and newparentpath) or \
(oldparentpath and not newparentpath) or \
(oldparentpath != newparentpath):
# node changed parent, updating count
if oldparentpath:
stmts.append(cls._get_sql_update_numchild(oldparentpath,
'dec'))
if newparentpath:
stmts.append(cls._get_sql_update_numchild(newparentpath,
'inc'))
@classmethod
def _get_sql_newpath_in_branches(cls, oldpath, newpath):
"""
:returns" The sql needed to move a branch to another position.
.. note::
The generated sql will only update the depth values if needed.
"""
sql1 = "UPDATE %s SET" % (
connection.ops.quote_name(cls._meta.db_table), )
# <3 "standard" sql
if cls.get_database_engine() == 'sqlite3':
# I know that the third argument in SUBSTR (LENGTH(path)) is
# awful, but sqlite fails without it:
# OperationalError: wrong number of arguments to function substr()
# even when the documentation says that 2 arguments are valid:
# http://www.sqlite.org/lang_corefunc.html
sqlpath = "%s||SUBSTR(path, %s, LENGTH(path))"
elif cls.get_database_engine() == 'mysql':
# hooray for mysql ignoring standards in their default
# configuration!
# to make || work as it should, enable ansi mode
# http://dev.mysql.com/doc/refman/5.0/en/ansi-mode.html
sqlpath = "CONCAT(%s, SUBSTR(path, %s))"
else:
sqlpath = "%s||SUBSTR(path, %s)"
sql2 = ["path=%s" % (sqlpath, )]
vals = [newpath, len(oldpath) + 1]
if (len(oldpath) != len(newpath) and
cls.get_database_engine() != 'mysql'):
# when using mysql, this won't update the depth and it has to be
# done in another query
# doesn't even work with sql_mode='ANSI,TRADITIONAL'
# TODO: FIND OUT WHY?!?? right now I'm just blaming mysql
sql2.append("depth=LENGTH(%s)/%%s" % (sqlpath, ))
vals.extend([newpath, len(oldpath) + 1, cls.steplen])
sql3 = "WHERE path LIKE %s"
vals.extend([oldpath + '%'])
sql = '%s %s %s' % (sql1, ', '.join(sql2), sql3)
return sql, vals
@classmethod
def _get_sql_update_depth_in_branch(cls, path):
"""
:returns: The sql needed to update the depth of all the nodes in a
branch.
"""
# Right now this is only used by *sigh* mysql.
sql = "UPDATE %s SET depth=LENGTH(path)/%%s" \
" WHERE path LIKE %%s" % (
connection.ops.quote_name(cls._meta.db_table), )
vals = [cls.steplen, path + '%']
return sql, vals
@classmethod
def _get_sql_update_numchild(cls, path, incdec='inc'):
":returns: The sql needed the numchild value of a node"
sql = "UPDATE %s SET numchild=numchild%s1" \
" WHERE path=%%s" % (
connection.ops.quote_name(cls._meta.db_table),
{'inc': '+', 'dec': '-'}[incdec])
vals = [path]
return sql, vals
class Meta:
"Abstract model."
abstract = True
|
|
# (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
Redis checks
'''
# stdlib
from collections import defaultdict
import re
import time
# 3rd party
import redis
# project
from checks import AgentCheck
DEFAULT_MAX_SLOW_ENTRIES = 128
MAX_SLOW_ENTRIES_KEY = "slowlog-max-len"
REPL_KEY = 'master_link_status'
LINK_DOWN_KEY = 'master_link_down_since_seconds'
class Redis(AgentCheck):
db_key_pattern = re.compile(r'^db\d+')
slave_key_pattern = re.compile(r'^slave\d+')
subkeys = ['keys', 'expires']
SOURCE_TYPE_NAME = 'redis'
GAUGE_KEYS = {
# Append-only metrics
'aof_last_rewrite_time_sec': 'redis.aof.last_rewrite_time',
'aof_rewrite_in_progress': 'redis.aof.rewrite',
'aof_current_size': 'redis.aof.size',
'aof_buffer_length': 'redis.aof.buffer_length',
# Network
'connected_clients': 'redis.net.clients',
'connected_slaves': 'redis.net.slaves',
'rejected_connections': 'redis.net.rejected',
# clients
'blocked_clients': 'redis.clients.blocked',
'client_biggest_input_buf': 'redis.clients.biggest_input_buf',
'client_longest_output_list': 'redis.clients.longest_output_list',
# Keys
'evicted_keys': 'redis.keys.evicted',
'expired_keys': 'redis.keys.expired',
# stats
'latest_fork_usec': 'redis.perf.latest_fork_usec',
'bytes_received_per_sec': 'redis.bytes_received_per_sec',
'bytes_sent_per_sec': 'redis.bytes_sent_per_sec',
# Note: 'bytes_received_per_sec' and 'bytes_sent_per_sec' are only
# available on Azure Redis
# pubsub
'pubsub_channels': 'redis.pubsub.channels',
'pubsub_patterns': 'redis.pubsub.patterns',
# rdb
'rdb_bgsave_in_progress': 'redis.rdb.bgsave',
'rdb_changes_since_last_save': 'redis.rdb.changes_since_last',
'rdb_last_bgsave_time_sec': 'redis.rdb.last_bgsave_time',
# memory
'mem_fragmentation_ratio': 'redis.mem.fragmentation_ratio',
'used_memory': 'redis.mem.used',
'used_memory_lua': 'redis.mem.lua',
'used_memory_peak': 'redis.mem.peak',
'used_memory_rss': 'redis.mem.rss',
'maxmemory': 'redis.mem.maxmemory',
# replication
'master_last_io_seconds_ago': 'redis.replication.last_io_seconds_ago',
'master_sync_in_progress': 'redis.replication.sync',
'master_sync_left_bytes': 'redis.replication.sync_left_bytes',
'repl_backlog_histlen': 'redis.replication.backlog_histlen',
'master_repl_offset': 'redis.replication.master_repl_offset',
'slave_repl_offset': 'redis.replication.slave_repl_offset',
}
RATE_KEYS = {
# cpu
'used_cpu_sys': 'redis.cpu.sys',
'used_cpu_sys_children': 'redis.cpu.sys_children',
'used_cpu_user': 'redis.cpu.user',
'used_cpu_user_children': 'redis.cpu.user_children',
# stats
'keyspace_hits': 'redis.stats.keyspace_hits',
'keyspace_misses': 'redis.stats.keyspace_misses',
}
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
self.connections = {}
self.last_timestamp_seen = defaultdict(int)
def get_library_versions(self):
return {"redis": redis.__version__}
def _parse_dict_string(self, string, key, default):
"""Take from a more recent redis.py, parse_info"""
try:
for item in string.split(','):
k, v = item.rsplit('=', 1)
if k == key:
try:
return int(v)
except ValueError:
return v
return default
except Exception:
self.log.exception("Cannot parse dictionary string: %s" % string)
return default
def _generate_instance_key(self, instance):
if 'unix_socket_path' in instance:
return (instance.get('unix_socket_path'), instance.get('db'))
else:
return (instance.get('host'), instance.get('port'), instance.get('db'))
def _get_conn(self, instance):
key = self._generate_instance_key(instance)
if key not in self.connections:
try:
# Only send useful parameters to the redis client constructor
list_params = ['host', 'port', 'db', 'password', 'socket_timeout',
'connection_pool', 'charset', 'errors', 'unix_socket_path', 'ssl',
'ssl_certfile', 'ssl_keyfile', 'ssl_ca_certs', 'ssl_cert_reqs']
# Set a default timeout (in seconds) if no timeout is specified in the instance config
instance['socket_timeout'] = instance.get('socket_timeout', 5)
connection_params = dict((k, instance[k]) for k in list_params if k in instance)
self.connections[key] = redis.Redis(**connection_params)
except TypeError:
raise Exception("You need a redis library that supports authenticated connections. Try sudo easy_install redis.")
return self.connections[key]
def _get_tags(self, custom_tags, instance):
tags = set(custom_tags or [])
if 'unix_socket_path' in instance:
tags_to_add = [
"redis_host:%s" % instance.get("unix_socket_path"),
"redis_port:unix_socket",
]
else:
tags_to_add = [
"redis_host:%s" % instance.get('host'),
"redis_port:%s" % instance.get('port')
]
tags = sorted(tags.union(tags_to_add))
return tags
def _check_db(self, instance, custom_tags=None):
conn = self._get_conn(instance)
tags = self._get_tags(custom_tags, instance)
# Ping the database for info, and track the latency.
# Process the service check: the check passes if we can connect to Redis
start = time.time()
info = None
try:
info = conn.info()
tags = sorted(tags + ["redis_role:%s" % info["role"]])
status = AgentCheck.OK
self.service_check('redis.can_connect', status, tags=tags)
self._collect_metadata(info)
except ValueError:
status = AgentCheck.CRITICAL
self.service_check('redis.can_connect', status, tags=tags)
raise
except Exception:
status = AgentCheck.CRITICAL
self.service_check('redis.can_connect', status, tags=tags)
raise
latency_ms = round((time.time() - start) * 1000, 2)
self.gauge('redis.info.latency_ms', latency_ms, tags=tags)
# Save the database statistics.
for key in info.keys():
if self.db_key_pattern.match(key):
db_tags = list(tags) + ["redis_db:" + key]
# allows tracking percentage of expired keys as DD does not
# currently allow arithmetic on metric for monitoring
expires_keys = info[key]["expires"]
total_keys = info[key]["keys"]
persist_keys = total_keys - expires_keys
self.gauge("redis.persist", persist_keys, tags=db_tags)
self.gauge("redis.persist.percent", 100.0 * persist_keys / total_keys, tags=db_tags)
self.gauge("redis.expires.percent", 100.0 * expires_keys / total_keys, tags=db_tags)
for subkey in self.subkeys:
# Old redis module on ubuntu 10.04 (python-redis 0.6.1) does not
# returns a dict for those key but a string: keys=3,expires=0
# Try to parse it (see lighthouse #46)
val = -1
try:
val = info[key].get(subkey, -1)
except AttributeError:
val = self._parse_dict_string(info[key], subkey, -1)
metric = '.'.join(['redis', subkey])
self.gauge(metric, val, tags=db_tags)
# Save a subset of db-wide statistics
for info_name, value in info.iteritems():
if info_name in self.GAUGE_KEYS:
self.gauge(self.GAUGE_KEYS[info_name], info[info_name], tags=tags)
elif info_name in self.RATE_KEYS:
self.rate(self.RATE_KEYS[info_name], info[info_name], tags=tags)
# Save the number of commands.
self.rate('redis.net.commands', info['total_commands_processed'],
tags=tags)
if 'instantaneous_ops_per_sec' in info:
self.gauge('redis.net.instantaneous_ops_per_sec', info['instantaneous_ops_per_sec'],
tags=tags)
# Check some key lengths if asked
key_list = instance.get('keys')
if key_list is not None:
if not isinstance(key_list, list) or len(key_list) == 0:
self.warning("keys in redis configuration is either not a list or empty")
else:
l_tags = list(tags)
for key_pattern in key_list:
if re.search(r"(?<!\\)[*?[]", key_pattern):
keys = conn.scan_iter(match=key_pattern)
else:
keys = [key_pattern, ]
for key in keys:
try:
key_type = conn.type(key)
except redis.ResponseError:
self.log.info("key {} on remote server; skipping".format(key))
continue
key_tags = l_tags + ['key:' + key]
if key_type == 'list':
self.gauge('redis.key.length', conn.llen(key), tags=key_tags)
elif key_type == 'set':
self.gauge('redis.key.length', conn.scard(key), tags=key_tags)
elif key_type == 'zset':
self.gauge('redis.key.length', conn.zcard(key), tags=key_tags)
elif key_type == 'hash':
self.gauge('redis.key.length', conn.hlen(key), tags=key_tags)
else:
# If the type is unknown, it might be because the key doesn't exist,
# which can be because the list is empty. So always send 0 in that case.
if instance.get("warn_on_missing_keys", True):
self.warning("{0} key not found in redis".format(key))
self.gauge('redis.key.length', 0, tags=key_tags)
self._check_replication(info, tags)
if instance.get("command_stats", False):
self._check_command_stats(conn, tags)
def _check_replication(self, info, tags):
# Save the replication delay for each slave
for key in info:
if self.slave_key_pattern.match(key) and isinstance(info[key], dict):
slave_offset = info[key].get('offset')
master_offset = info.get('master_repl_offset')
if slave_offset and master_offset and master_offset - slave_offset >= 0:
delay = master_offset - slave_offset
# Add id, ip, and port tags for the slave
slave_tags = tags[:]
for slave_tag in ('ip', 'port'):
if slave_tag in info[key]:
slave_tags.append('slave_{0}:{1}'.format(slave_tag, info[key][slave_tag]))
slave_tags.append('slave_id:%s' % key.lstrip('slave'))
self.gauge('redis.replication.delay', delay, tags=slave_tags)
if REPL_KEY in info:
if info[REPL_KEY] == 'up':
status = AgentCheck.OK
down_seconds = 0
else:
status = AgentCheck.CRITICAL
down_seconds = info[LINK_DOWN_KEY]
self.service_check('redis.replication.master_link_status', status, tags=tags)
self.gauge('redis.replication.master_link_down_since_seconds', down_seconds, tags=tags)
def _check_slowlog(self, instance, custom_tags):
"""Retrieve length and entries from Redis' SLOWLOG
This will parse through all entries of the SLOWLOG and select ones
within the time range between the last seen entries and now
"""
conn = self._get_conn(instance)
tags = self._get_tags(custom_tags, instance)
if not instance.get(MAX_SLOW_ENTRIES_KEY):
try:
max_slow_entries = int(conn.config_get(MAX_SLOW_ENTRIES_KEY)[MAX_SLOW_ENTRIES_KEY])
if max_slow_entries > DEFAULT_MAX_SLOW_ENTRIES:
self.warning("Redis {0} is higher than {1}. Defaulting to {1}."
"If you need a higher value, please set {0} in your check config"
.format(MAX_SLOW_ENTRIES_KEY, DEFAULT_MAX_SLOW_ENTRIES))
max_slow_entries = DEFAULT_MAX_SLOW_ENTRIES
# No config on AWS Elasticache
except redis.ResponseError:
max_slow_entries = DEFAULT_MAX_SLOW_ENTRIES
else:
max_slow_entries = int(instance.get(MAX_SLOW_ENTRIES_KEY))
# Generate a unique id for this instance to be persisted across runs
ts_key = self._generate_instance_key(instance)
# Get all slowlog entries
slowlogs = conn.slowlog_get(max_slow_entries)
# Find slowlog entries between last timestamp and now using start_time
slowlogs = [s for s in slowlogs if s['start_time'] >
self.last_timestamp_seen[ts_key]]
max_ts = 0
# Slowlog entry looks like:
# {'command': 'LPOP somekey',
# 'duration': 11238,
# 'id': 496L,
# 'start_time': 1422529869}
for slowlog in slowlogs:
if slowlog['start_time'] > max_ts:
max_ts = slowlog['start_time']
slowlog_tags = list(tags)
command = slowlog['command'].split()
# When the "Garantia Data" custom Redis is used, redis-py returns
# an empty `command` field
# FIXME when https://github.com/andymccurdy/redis-py/pull/622 is released in redis-py
if command:
slowlog_tags.append('command:{0}'.format(command[0]))
value = slowlog['duration']
self.histogram('redis.slowlog.micros', value, tags=slowlog_tags)
self.last_timestamp_seen[ts_key] = max_ts
def _check_command_stats(self, conn, tags):
"""Get command-specific statistics from redis' INFO COMMANDSTATS command
"""
try:
command_stats = conn.info("commandstats")
except Exception:
self.warning("Could not retrieve command stats from Redis."
"INFO COMMANDSTATS only works with Redis >= 2.6.")
return
for key, stats in command_stats.iteritems():
command = key.split('_', 1)[1]
command_tags = tags + ['command:%s' % command]
self.gauge('redis.command.calls', stats['calls'], tags=command_tags)
self.gauge('redis.command.usec_per_call', stats['usec_per_call'], tags=command_tags)
def check(self, instance):
if ("host" not in instance or "port" not in instance) and "unix_socket_path" not in instance:
raise Exception("You must specify a host/port couple or a unix_socket_path")
custom_tags = instance.get('tags', [])
self._check_db(instance, custom_tags)
self._check_slowlog(instance, custom_tags)
def _collect_metadata(self, info):
if info and 'redis_version' in info:
self.service_metadata('version', info['redis_version'])
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The plugin for serving data from a TensorFlow debugger."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import json
import os
import re
from werkzeug import wrappers
from mxconsole.framework import tensor_util
from mxconsole.platform import tf_logging as logging
from mxconsole.backend import http_util
from mxconsole.backend.event_processing import event_accumulator
from mxconsole.backend.event_processing import event_file_loader
from mxconsole.plugins import base_plugin
# The prefix of routes provided by this plugin.
PLUGIN_PREFIX_ROUTE = 'debugger'
# HTTP routes.
_HEALTH_PILLS_ROUTE = '/health_pills'
# The POST key of HEALTH_PILLS_ROUTE for a JSON list of node names.
_NODE_NAMES_POST_KEY = 'node_names'
# The POST key of HEALTH_PILLS_ROUTE for the run to retrieve health pills for.
_RUN_POST_KEY = 'run'
# The default run to retrieve health pills for.
_DEFAULT_RUN = '.'
# The POST key of HEALTH_PILLS_ROUTE for the specific step to retrieve health
# pills for.
_STEP_POST_KEY = 'step'
# A glob pattern for files containing debugger-related events.
_DEBUGGER_EVENTS_GLOB_PATTERN = 'events.debugger*'
class DebuggerPlugin(base_plugin.TBPlugin):
"""TensorFlow Debugger plugin. Receives requests for debugger-related data.
That data could include health pills, which unveil the status of tensor
values.
"""
def get_plugin_apps(self, multiplexer, logdir):
"""Obtains a mapping between routes and handlers. Stores the logdir.
Args:
multiplexer: The EventMultiplexer that provides TB data.
logdir: The logdir string - the directory of events files.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
self._event_multiplexer = multiplexer
self._logdir = logdir
return {
_HEALTH_PILLS_ROUTE: self._serve_health_pills_handler,
}
@wrappers.Request.application
def _serve_health_pills_handler(self, request):
"""A (wrapped) werkzeug handler for serving health pills.
Accepts POST requests and responds with health pills. The request accepts
several POST parameters:
node_names: (required string) A JSON-ified list of node names for which
the client would like to request health pills.
run: (optional string) The run to retrieve health pills for. Defaults to
'.'. This data is sent via POST (not GET) since URL length is limited.
step: (optional integer): The session run step for which to
retrieve health pills. If provided, the handler reads the health pills
of that step from disk (which is slow) and produces a response with
only health pills at that step. If not provided, the handler returns a
response with health pills at all steps sampled by the event
multiplexer (the fast path). The motivation here is that, sometimes,
one desires to examine health pills at a specific step (to say find
the first step that causes a model to blow up with NaNs).
get_plugin_apps must be called before this slower feature is used
because that method passes the logdir (directory path) to this plugin.
This handler responds with a JSON-ified object mapping from node names to a
list (of size 1) of health pill event objects, each of which has these
properties.
{
'wall_time': float,
'step': int,
'node_name': string,
'output_slot': int,
# A list of 12 floats that summarizes the elements of the tensor.
'value': float[],
}
Node names for which there are no health pills to be found are excluded from
the mapping.
Args:
request: The request issued by the client for health pills.
Returns:
A werkzeug BaseResponse object.
"""
if request.method != 'POST':
logging.error(
'%s requests are forbidden by the debugger plugin.', request.method)
return wrappers.Response(status=405)
if _NODE_NAMES_POST_KEY not in request.form:
logging.error(
'The %r POST key was not found in the request for health pills.',
_NODE_NAMES_POST_KEY)
return wrappers.Response(status=400)
jsonified_node_names = request.form[_NODE_NAMES_POST_KEY]
try:
node_names = json.loads(jsonified_node_names)
except Exception as e: # pylint: disable=broad-except
# Different JSON libs raise different exceptions, so we just do a
# catch-all here. This problem is complicated by how Tensorboard might be
# run in many different environments, as it is open-source.
logging.error('Could not decode node name JSON string %r: %s',
jsonified_node_names, e)
return wrappers.Response(status=400)
if not isinstance(node_names, list):
logging.error('%r is not a JSON list of node names:',
jsonified_node_names)
return wrappers.Response(status=400)
run = request.form.get(_RUN_POST_KEY, _DEFAULT_RUN)
step_string = request.form.get(_STEP_POST_KEY, None)
if step_string is None:
# Use all steps sampled by the event multiplexer (Relatively fast).
mapping = self._obtain_sampled_health_pills(run, node_names)
else:
# Read disk to obtain the health pills for that step (Relatively slow).
# Make sure that the directory for the run exists.
# Determine the directory of events file to read.
events_directory = self._logdir
if run != _DEFAULT_RUN:
# Use the directory for the specific run.
events_directory = os.path.join(events_directory, run)
step = int(step_string)
try:
mapping = self._obtain_health_pills_at_step(
events_directory, node_names, step)
except IOError as error:
logging.error(
'Error retrieving health pills for step %d: %s', step, error)
return wrappers.Response(status=404)
# Convert event_accumulator.HealthPillEvents to JSON-able dicts.
jsonable_mapping = {}
for node_name, events in mapping.items():
jsonable_mapping[node_name] = [e._asdict() for e in events]
return http_util.Respond(request, jsonable_mapping, 'application/json')
def _obtain_sampled_health_pills(self, run, node_names):
"""Obtains the health pills for a run sampled by the event multiplexer.
This is much faster than the alternative path of reading health pills from
disk.
Args:
run: The run to fetch health pills for.
node_names: A list of node names for which to retrieve health pills.
Returns:
A dictionary mapping from node name to a list of
event_accumulator.HealthPillEvents.
"""
mapping = {}
for node_name in node_names:
try:
mapping[node_name] = self._event_multiplexer.HealthPills(run, node_name)
except KeyError:
logging.info('No health pills found for node %r.', node_name)
continue
return mapping
def _obtain_health_pills_at_step(self, events_directory, node_names, step):
"""Reads disk to obtain the health pills for a run at a specific step.
This could be much slower than the alternative path of just returning all
health pills sampled by the event multiplexer. It could take tens of minutes
to complete this call for large graphs for big step values (in the
thousands).
Args:
events_directory: The directory containing events for the desired run.
node_names: A list of node names for which to retrieve health pills.
step: The step to obtain health pills for.
Returns:
A dictionary mapping from node name to a list of health pill objects (see
docs for _serve_health_pills_handler for properties of those objects).
Raises:
IOError: If no files with health pill events could be found.
"""
# Obtain all files with debugger-related events.
pattern = os.path.join(events_directory, _DEBUGGER_EVENTS_GLOB_PATTERN)
file_paths = glob.glob(pattern)
if not file_paths:
raise IOError(
'No events files found that matches the pattern %r.', pattern)
# Sort by name (and thus by timestamp).
file_paths.sort()
mapping = collections.defaultdict(list)
node_name_set = frozenset(node_names)
for file_path in file_paths:
should_stop = self._process_health_pill_event(
node_name_set, mapping, step, file_path)
if should_stop:
break
return mapping
def _process_health_pill_event(self, node_name_set, mapping, target_step,
file_path):
"""Creates health pills out of data in an event.
Creates health pills out of the event and adds them to the mapping.
Args:
node_name_set: A set of node names that are relevant.
mapping: The mapping from node name to event_accumulator.HealthPillEvents.
This object may be destructively modified.
target_step: The target step at which to obtain health pills.
file_path: The path to the file with health pill events.
Returns:
Whether we should stop reading events because future events are no longer
relevant.
"""
events_loader = event_file_loader.EventFileLoader(file_path)
for event in events_loader.Load():
if not event.HasField('summary'):
logging.warning('An event in a debugger events file lacks a summary.')
continue
if event.step < target_step:
# This event is not of the relevant step. We perform this check
# first because the majority of events will be eliminated from
# consideration by this check.
continue
if event.step > target_step:
# We have passed the relevant step. No need to read more events.
return True
for value in event.summary.value:
# Since we seek health pills for a specific step, this function
# returns 1 health pill per node per step. The wall time is the
# seconds since the epoch.
health_pill = self._process_health_pill_value(
node_name_set, event.wall_time, event.step, value)
if not health_pill:
continue
mapping[health_pill.node_name].append(health_pill)
# Keep reading events.
return False
def _process_health_pill_value(self, node_name_set, wall_time, step, value):
"""Creates a dict containing various properties of a health pill.
Args:
node_name_set: A set of node names that are relevant.
wall_time: The wall time in seconds.
step: The session run step of the event.
value: The health pill value.
Returns:
An event_accumulator.HealthPillEvent. Or None if one could not be created.
"""
if not value.HasField('tensor'):
logging.warning(
'An event in a debugger events file lacks a tensor value.')
return None
if value.tag != event_accumulator.HEALTH_PILL_EVENT_TAG:
logging.warning(
('A debugger-related event lacks the %r tag. It instead has '
'the %r tag.'), event_accumulator.HEALTH_PILL_EVENT_TAG, value.tag)
return None
match = re.match(r'^(.*):(\d+):DebugNumericSummary$', value.node_name)
if not match:
logging.warning(
('A event with a health pill has an invalid watch, (i.e., an '
'unexpected debug op): %r'), value.node_name)
return None
node_name = match.group(1)
if node_name not in node_name_set:
# This event is not relevant.
return None
# Since we seek health pills for a specific step, this function
# returns 1 health pill per node per step. The wall time is the
# seconds since the epoch.
return event_accumulator.HealthPillEvent(
wall_time=wall_time,
step=step,
node_name=node_name,
output_slot=int(match.group(2)),
value=list(tensor_util.MakeNdarray(value.tensor)))
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Book'
db.create_table('cabinet_book', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('isbn', self.gf('django.db.models.fields.CharField')(max_length=100)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('author', self.gf('django.db.models.fields.CharField')(max_length=100)),
('douban_id', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)),
))
db.send_create_signal('cabinet', ['Book'])
# Adding model 'BookComment'
db.create_table('cabinet_bookcomment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Book'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('content', self.gf('django.db.models.fields.CharField')(max_length=2048)),
('status', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('cabinet', ['BookComment'])
# Adding model 'BookOwnership'
db.create_table('cabinet_bookownership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Book'])),
('status', self.gf('django.db.models.fields.CharField')(max_length=16)),
('has_ebook', self.gf('django.db.models.fields.BooleanField')(default=False)),
('remark', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
))
db.send_create_signal('cabinet', ['BookOwnership'])
# Adding model 'BookBorrowRecord'
db.create_table('cabinet_bookborrowrecord', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ownership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.BookOwnership'])),
('borrower', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('borrow_date', self.gf('django.db.models.fields.DateTimeField')()),
('planed_return_date', self.gf('django.db.models.fields.DateField')(blank=True)),
('returned_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('cabinet', ['BookBorrowRecord'])
# Adding model 'BookCabinet'
db.create_table('cabinet_bookcabinet', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('create_datetime', self.gf('django.db.models.fields.DateTimeField')()),
('remark', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
))
db.send_create_signal('cabinet', ['BookCabinet'])
# Adding M2M table for field books on 'BookCabinet'
db.create_table('cabinet_bookcabinet_books', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('bookcabinet', models.ForeignKey(orm['cabinet.bookcabinet'], null=False)),
('bookownership', models.ForeignKey(orm['cabinet.bookownership'], null=False))
))
db.create_unique('cabinet_bookcabinet_books', ['bookcabinet_id', 'bookownership_id'])
# Adding model 'CabinetNews'
db.create_table('cabinet_cabinetnews', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('lead', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('news', self.gf('django.db.models.fields.CharField')(max_length=256)),
))
db.send_create_signal('cabinet', ['CabinetNews'])
# Adding model 'EBookRequest'
db.create_table('cabinet_ebookrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('bo_ship', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.BookOwnership'])),
))
db.send_create_signal('cabinet', ['EBookRequest'])
# Adding model 'BookBorrowRequest'
db.create_table('cabinet_bookborrowrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('bo_ship', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.BookOwnership'])),
('planed_return_date', self.gf('django.db.models.fields.DateField')()),
('remark', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
))
db.send_create_signal('cabinet', ['BookBorrowRequest'])
# Adding model 'Repository'
db.create_table('cabinet_repository', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('create_time', self.gf('django.db.models.fields.DateTimeField')()),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('description', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
))
db.send_create_signal('cabinet', ['Repository'])
# Adding M2M table for field admin on 'Repository'
db.create_table('cabinet_repository_admin', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('repository', models.ForeignKey(orm['cabinet.repository'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('cabinet_repository_admin', ['repository_id', 'user_id'])
# Adding M2M table for field members on 'Repository'
db.create_table('cabinet_repository_members', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('repository', models.ForeignKey(orm['cabinet.repository'], null=False)),
('user', models.ForeignKey(orm['auth.user'], null=False))
))
db.create_unique('cabinet_repository_members', ['repository_id', 'user_id'])
# Adding model 'JoinRepositoryRequest'
db.create_table('cabinet_joinrepositoryrequest', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('repo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Repository'])),
('remark', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
))
db.send_create_signal('cabinet', ['JoinRepositoryRequest'])
# Adding model 'Feedback'
db.create_table('cabinet_feedback', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')()),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=256)),
('content', self.gf('django.db.models.fields.CharField')(max_length=1024)),
('status', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('cabinet', ['Feedback'])
# Adding model 'Tag'
db.create_table('cabinet_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
))
db.send_create_signal('cabinet', ['Tag'])
# Adding model 'BookTagUse'
db.create_table('cabinet_booktaguse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Tag'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Book'])),
))
db.send_create_signal('cabinet', ['BookTagUse'])
# Adding unique constraint on 'BookTagUse', fields ['tag', 'user', 'book']
db.create_unique('cabinet_booktaguse', ['tag_id', 'user_id', 'book_id'])
# Adding model 'BookOwnershipTagUse'
db.create_table('cabinet_bookownershiptaguse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Tag'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('bookown', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.BookOwnership'])),
))
db.send_create_signal('cabinet', ['BookOwnershipTagUse'])
# Adding model 'SysBookTagUse'
db.create_table('cabinet_sysbooktaguse', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Tag'])),
('book', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cabinet.Book'])),
))
db.send_create_signal('cabinet', ['SysBookTagUse'])
def backwards(self, orm):
# Removing unique constraint on 'BookTagUse', fields ['tag', 'user', 'book']
db.delete_unique('cabinet_booktaguse', ['tag_id', 'user_id', 'book_id'])
# Deleting model 'Book'
db.delete_table('cabinet_book')
# Deleting model 'BookComment'
db.delete_table('cabinet_bookcomment')
# Deleting model 'BookOwnership'
db.delete_table('cabinet_bookownership')
# Deleting model 'BookBorrowRecord'
db.delete_table('cabinet_bookborrowrecord')
# Deleting model 'BookCabinet'
db.delete_table('cabinet_bookcabinet')
# Removing M2M table for field books on 'BookCabinet'
db.delete_table('cabinet_bookcabinet_books')
# Deleting model 'CabinetNews'
db.delete_table('cabinet_cabinetnews')
# Deleting model 'EBookRequest'
db.delete_table('cabinet_ebookrequest')
# Deleting model 'BookBorrowRequest'
db.delete_table('cabinet_bookborrowrequest')
# Deleting model 'Repository'
db.delete_table('cabinet_repository')
# Removing M2M table for field admin on 'Repository'
db.delete_table('cabinet_repository_admin')
# Removing M2M table for field members on 'Repository'
db.delete_table('cabinet_repository_members')
# Deleting model 'JoinRepositoryRequest'
db.delete_table('cabinet_joinrepositoryrequest')
# Deleting model 'Feedback'
db.delete_table('cabinet_feedback')
# Deleting model 'Tag'
db.delete_table('cabinet_tag')
# Deleting model 'BookTagUse'
db.delete_table('cabinet_booktaguse')
# Deleting model 'BookOwnershipTagUse'
db.delete_table('cabinet_bookownershiptaguse')
# Deleting model 'SysBookTagUse'
db.delete_table('cabinet_sysbooktaguse')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cabinet.book': {
'Meta': {'object_name': 'Book'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'douban_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cabinet.bookborrowrecord': {
'Meta': {'object_name': 'BookBorrowRecord'},
'borrow_date': ('django.db.models.fields.DateTimeField', [], {}),
'borrower': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ownership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'planed_return_date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'returned_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'cabinet.bookborrowrequest': {
'Meta': {'object_name': 'BookBorrowRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'planed_return_date': ('django.db.models.fields.DateField', [], {}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.bookcabinet': {
'Meta': {'object_name': 'BookCabinet'},
'books': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['cabinet.BookOwnership']", 'null': 'True', 'blank': 'True'}),
'create_datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'})
},
'cabinet.bookcomment': {
'Meta': {'object_name': 'BookComment'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.bookownership': {
'Meta': {'object_name': 'BookOwnership'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'has_ebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'cabinet.bookownershiptaguse': {
'Meta': {'object_name': 'BookOwnershipTagUse'},
'bookown': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.booktaguse': {
'Meta': {'unique_together': "(('tag', 'user', 'book'),)", 'object_name': 'BookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.cabinetnews': {
'Meta': {'object_name': 'CabinetNews'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lead': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'news': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cabinet.ebookrequest': {
'Meta': {'object_name': 'EBookRequest'},
'bo_ship': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.BookOwnership']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.feedback': {
'Meta': {'object_name': 'Feedback'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.joinrepositoryrequest': {
'Meta': {'object_name': 'JoinRepositoryRequest'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Repository']"}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cabinet.repository': {
'Meta': {'object_name': 'Repository'},
'admin': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'managed_repos'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'create_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'joined_repos'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'cabinet.sysbooktaguse': {
'Meta': {'object_name': 'SysBookTagUse'},
'book': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Book']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cabinet.Tag']"})
},
'cabinet.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['cabinet']
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# enable debugging
import sys, os, time, datetime, random
from glob import glob
sys.path.append( os.environ['CONFPATH'] )
import config
config.set_incpath()
import session
templates = {'main':''}
rendered = False
coloured = True
cookies = ''
colors = {
'red' : [255,50, 50],
'green' : [100,180,100],
'blue' : [40, 110, 170],
'purple': [140, 30, 200],
'orange': [220, 150, 40],
'brown' : [150, 100, 50],
'pink' : [255, 100, 180]
}
def shorten(figure):
letter = ''
if figure >= 1000:
letter = 'k'
figure = figure / 1000
if figure >= 1000:
letter = 'm'
figure = figure / 1000
if figure > 100:
figure = int(figure)
elif figure > 10:
figure = round(figure, 1)
else:
figure = round(figure, 2)
return str(figure)+letter
def get_colorcodes(num=6, rgb=True):
conversion = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
_colors_ = []
_c_ = list(colors.values())
for a in range(0, num):
color = random.choice( range(len(_c_)) )
color_code = ','.join([str(i) for i in _c_[color]])
del _c_[color]
if not len(_c_):
_c_.append([30, 30, 30])
if not rgb:
_code_ = color_code.split(',')
for i in [0,1,2]:
_code_[i] = conversion[int(int(_code_[i])/16)] + conversion[int(_code_[i])%16]
_colors_.append("#"+''.join(_code_))
else:
_colors_.append("rgb("+color_code+")")
return _colors_
def echo(string, hook='main', overwrite=False, fill={}):
if overwrite or (hook not in templates):
templates[hook] = ''
to_insert = str(string)
if len(fill):
for i in fill:
_hook_ = "{{"+i+"}}"
if _hook_ in to_insert:
to_insert = to_insert.replace(_hook_, str(fill[i]))
templates[hook] += to_insert+"\n"
def include(template='null', hook='main', overwrite=False, fill={}):
found = glob(os.environ['TEMPLATES']+'/'+template+'.html') + glob(os.environ['TEMPLATES']+'/*/'+template+'.html')
t = open(found[0]).read()
echo(t, hook=hook, overwrite=overwrite, fill=fill)
def get(template='null'):
found = glob(os.environ['TEMPLATES']+'/'+template+'.html') + glob(os.environ['TEMPLATES']+'/*/'+template+'.html')
return open(found[0]).read().decode("utf-8", "strict")
def render_headers():
if 'http-headers' in templates.keys():
print(templates['http-headers'], end='')
print("Generated-with: Custom headers")
else:
templates['http-headers'] = ''
print("Generated-with: Default headers")
if 'Content-Type' not in templates['http-headers']:# and 'Location:' not in templates['http-headers']:
print("Content-Type: text/html;charset=utf-8")
print(str(session.SESSION.output()))
print()
def render_response():
done = []
for t in templates.keys():
for _t_ in templates.keys():
if t == _t_ or _t_ in done:
continue
needle = templates[t]
hook = "{{"+t+"}}"
haystack = templates[_t_]
if hook in haystack:
templates[_t_] = haystack.replace("{{"+t+"}}", needle)
done.append(t)
break
for t in done:
del templates[t]
rendered = True
print(templates['root'])
def send_json(_json_):
import json
echo('Content-Type: text/json', 'http-headers', overwrite=True)
echo(json.dumps(_json_), hook='root', overwrite=True)
def paginate(style='numbers', base='', current=1, params={}, criterion='page', last=1, options=[]):
if style not in ['numbers', 'languages', 'types']:
style = 'numbers'
if style == 'letters':
chain = 'abcdefghijklmnopqrstuvwxyz'
"""
_ps_ = ''
_ps_ += '<div style="text-align:center;clear:both;">'
if len(params):
_ps_ +='<ul class="pagination pagination-sm lettres"><li><a href="?">Reset</a></li></ul>'
_ps_ += '<ul class="pagination pagination-sm lettres">'
if 'begins' in params.keys() and params['begins'].find(str(current)
{if @strpos($chaine, Request.get('params')['begins']) > 0}
<li>
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-left'></span>"
id="go-left" href="?{:@http_build_query(array_merge($p, array('begins'=>$chaine[strpos($chaine, Request.get('params')['begins'])-1])))}">
{elseif @Request.get('params')['begins'] == $chaine[0]}
<li>
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-left'></span>"
id="go-left" href="?{:@http_build_query($p)}">
{else}
<li>
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-left'></span>"
id="go-left" href="?{:@http_build_query(array_merge($p, array('begins'=>$chaine[strlen($chaine)-1])))}">
{/if}<span class="glyphicon glyphicon-chevron-left"></span></a>
</li>
<li {:(empty(Request.get('params')['begins'])) ? 'class="active"' : ''}>
<a href="?{:@http_build_query($p)}">
{:t("Tout")}
</a>
</li>
{for $i = 0; $i < strlen($chaine); $i++}
<li {:( ($chaine[$i] == @Request.get('params')['begins']) ? 'class="active"' : '' )}>
<a href="?{:@http_build_query(array_merge($p, array('begins'=>$chaine[$i])))}">
{:strtoupper($chaine[$i])}
</a>
</li>
{/for}
<li>
{if @strpos(' '.$chaine, @Request.get('params')['begins']) > 0 And @strpos($chaine, @Request.get('params')['begins']) < strlen($chaine)-1}
<li>
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-right'></span>"
id="go-right" href="?{:http_build_query(@array_merge($p, array('begins'=>$chaine[@(int)strpos($chaine, Request.get('params')['begins'])+1])))}">
{elseif @empty(Request.get('params')['begins'])}
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-right'></span>"
id="go-right" href="?{:http_build_query(@array_merge($p, array('begins'=>$chaine[0])))}">
{else}
<li>
<a
data-toggle="tooltip" data-placement="top" title="{:t('Clavier')} : <span class='glyphicon glyphicon-arrow-right'></span>"
id="go-right" href="?{:@http_build_query($p)}">
{/if}<span class="glyphicon glyphicon-chevron-right"></span></a>
</li>
</ul>
_ps_ += '</div>'
"""
elif style == 'types':
_ps_ = '<div id="selection" class="btn-group" data-toggle="buttons">'
_ps_ += ''.join(['<label class="btn btn-default'+('', ' active')[x==current]+'" data-toggle-class="btn-primary" data-toggle-passive-class="btn-default"><input type="radio" name="selection" value="'+x+'" data-url="'+y['url']+'"><span style="margin-right:5px;text-shadow:1px 1px 1px #222,1px 1px 1px #222;"><i class="fa fa-user" style="color:#BBC;"></i><sup style="margin-left:-4px;"><i class="fa fa-'+y['icon']+'" style="color:'+y['color']+';"></i></sup></span>'+y['title']+'</label>' for (x, y) in options.items()])
_ps_ += '</div>'
_ps_ += '<script type="text/javascript">$(document).ready(function(){$("#selection label").click(function(){window.location.href = $(this).find("input").attr("data-url");});});</script>'
elif style == 'languages':
_ps_ = '<div style="text-align:center;clear:both;">'
if len(current):
_ps_ +='<ul class="pagination pagination-sm langues" style="margin:5px;"><li><a href="?">Reset</a></li></ul>'
_ps_ += '<ul class="pagination pagination-sm chiffres" style="margin:5px;">'
for i in sorted(options):
r = params
r[criterion] = i
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += '<li'+('',' class="active"')[i == current]+'><a href="'+chain+'"><img src="/static/img/flags/lang/'+str(i)+'.png"/></a></li>'
_ps_ += '</ul></div>'
elif style == 'numbers':
_ps_ = '<div style="text-align:center;clear:both;"><ul class="pagination pagination-sm chiffres" style="margin:5px;">'
_last_ellipse_ = 0
r = params
r[criterion] = str(1)
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += ('<li'+('',' class="disabled"')[1 == current]+'><a id="go-left" href="'+chain+'"><span class="glyphicon glyphicon-chevron-left"></span></a></li>')
r[criterion] = str(current-1)
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += ('<li'+('',' class="disabled"')[1 == current]+'><a id="go-left" href="'+chain+'"><span class="glyphicon glyphicon-arrow-left"></span></a></li>')
for i in range(1, last+1):
if i not in list(set([1, last]) | set(range(max(1,current-4), min(last, current+4)))):
if _last_ellipse_ != i-1:
_ps_ += '<li style="cursor:not-allowed;"><a>...</a></li>'
_last_ellipse_ = i
else:
r = params
r[criterion] = str(i)
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += '<li'+('',' class="active"')[i == current]+'><a href="'+chain+'">'+str(i)+'</a></li>'
r = params
r[criterion] = str(current+1)
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += ('<li'+('',' class="disabled"')[last == current]+'><a id="go-right" href="'+chain+'"><span class="glyphicon glyphicon-arrow-right"></span></a></li>')
r[criterion] = str(last)
chain = base + '?' + '&'.join([x+'='+y for x,y in r.items()])
_ps_ += ('<li'+('',' class="disabled"')[last == current]+'><a id="go-right" href="'+chain+'"><span class="glyphicon glyphicon-chevron-right"></span></a></li>')
_ps_ += '</ul></div>'
return _ps_
include('html.default', 'root')
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import DEFAULT
from mock import MagicMock
from mock import patch
from testtools.matchers import Is, Equals, Not
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.common import configuration
from trove.guestagent.common.configuration import ImportOverrideStrategy
from trove.guestagent.common import operating_system
from trove.guestagent.datastore.experimental.db2 import (
manager as db2_manager)
from trove.guestagent.datastore.experimental.db2 import (
service as db2_service)
from trove.guestagent import pkg as pkg
from trove.guestagent import volume
from trove.tests.unittests.guestagent.test_datastore_manager import \
DatastoreManagerTest
class GuestAgentDB2ManagerTest(DatastoreManagerTest):
@patch.object(ImportOverrideStrategy, '_initialize_import_directory')
@patch.multiple(operating_system, exists=DEFAULT, write_file=DEFAULT,
chown=DEFAULT, chmod=DEFAULT)
@patch.object(db2_service.DB2App, 'process_default_dbm_config')
def setUp(self, *arg, **kwargs):
super(GuestAgentDB2ManagerTest, self).setUp('db2')
self.real_status = db2_service.DB2AppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
db2_service.DB2AppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.manager = db2_manager.Manager()
self.real_db_app_status = db2_service.DB2AppStatus
self.origin_format = volume.VolumeDevice.format
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = db2_service.DB2App.stop_db
self.origin_start_db = db2_service.DB2App.start_db
self.orig_change_ownership = (db2_service.DB2App.change_ownership)
self.orig_create_databases = db2_service.DB2Admin.create_database
self.orig_list_databases = db2_service.DB2Admin.list_databases
self.orig_delete_database = db2_service.DB2Admin.delete_database
self.orig_create_users = db2_service.DB2Admin.create_user
self.orig_list_users = db2_service.DB2Admin.list_users
self.orig_delete_user = db2_service.DB2Admin.delete_user
self.orig_update_hostname = db2_service.DB2App.update_hostname
self.orig_backup_restore = backup.restore
self.orig_init_config = db2_service.DB2App.init_config
self.orig_update_overrides = db2_service.DB2App.update_overrides
self.orig_remove_overrides = db2_service.DB2App.remove_overrides
def tearDown(self):
super(GuestAgentDB2ManagerTest, self).tearDown()
db2_service.DB2AppStatus.set_status = self.real_db_app_status
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
db2_service.DB2App.stop_db = self.origin_stop_db
db2_service.DB2App.start_db = self.origin_start_db
db2_service.DB2App.change_ownership = self.orig_change_ownership
db2_service.DB2Admin.create_database = self.orig_create_databases
db2_service.DB2Admin.create_user = self.orig_create_users
db2_service.DB2Admin.create_database = self.orig_create_databases
db2_service.DB2Admin.list_databases = self.orig_list_databases
db2_service.DB2Admin.delete_database = self.orig_delete_database
db2_service.DB2Admin.create_user = self.orig_create_users
db2_service.DB2Admin.list_users = self.orig_list_users
db2_service.DB2Admin.delete_user = self.orig_delete_user
db2_service.DB2App.update_hostname = self.orig_update_hostname
backup.restore = self.orig_backup_restore
db2_service.DB2App.init_config = self.orig_init_config
db2_service.DB2App.update_overrides = self.orig_update_overrides
db2_service.DB2App.remove_overrides = self.orig_remove_overrides
def test_update_status(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def test_prepare_device_path_true(self):
self._prepare_dynamic()
def test_prepare_device_path_false(self):
self._prepare_dynamic(device_path=None)
def test_prepare_database(self):
self._prepare_dynamic(databases=['db1'])
def test_prepare_from_backup(self):
self._prepare_dynamic(['db2'], backup_id='123backup')
@patch.object(configuration.ConfigurationManager, 'save_configuration')
def _prepare_dynamic(self, packages=None, databases=None, users=None,
config_content='MockContent', device_path='/dev/vdb',
is_db_installed=True, backup_id=None, overrides=None):
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'DB2Backup',
'checksum': 'fake-checksum'} if backup_id else None
mock_status = MagicMock()
mock_app = MagicMock()
self.manager.appStatus = mock_status
self.manager.app = mock_app
mock_status.begin_install = MagicMock(return_value=None)
mock_app.change_ownership = MagicMock(return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
backup.restore = MagicMock(return_value=None)
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=MagicMock(
return_value=is_db_installed)):
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=databases,
memory_mb='2048', users=users,
device_path=device_path,
mount_point="/home/db2inst1/db2inst1",
backup_info=backup_info,
overrides=None,
cluster_config=None)
mock_status.begin_install.assert_any_call()
self.assertEqual(1, mock_app.change_ownership.call_count)
if databases:
self.assertTrue(db2_service.DB2Admin.create_database.called)
else:
self.assertFalse(db2_service.DB2Admin.create_database.called)
if users:
self.assertTrue(db2_service.DB2Admin.create_user.called)
else:
self.assertFalse(db2_service.DB2Admin.create_user.called)
if backup_id:
backup.restore.assert_any_call(self.context,
backup_info,
'/home/db2inst1/db2inst1')
self.assertTrue(
self.manager.configuration_manager.save_configuration.called
)
def test_restart(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
with patch.object(db2_service.DB2App, 'restart',
return_value=None) as restart_mock:
# invocation
self.manager.restart(self.context)
# verification/assertion
restart_mock.assert_any_call()
def test_stop_db(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2App.stop_db = MagicMock(return_value=None)
self.manager.stop_db(self.context)
db2_service.DB2App.stop_db.assert_any_call(
do_not_start_on_reboot=False)
def test_start_db_with_conf_changes(self):
with patch.object(db2_service.DB2App, 'start_db_with_conf_changes'):
self.manager.start_db_with_conf_changes(self.context, 'something')
db2_service.DB2App.start_db_with_conf_changes.assert_any_call(
'something')
def test_create_database(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.create_database = MagicMock(return_value=None)
self.manager.create_database(self.context, ['db1'])
db2_service.DB2Admin.create_database.assert_any_call(['db1'])
def test_create_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.create_user = MagicMock(return_value=None)
self.manager.create_user(self.context, ['user1'])
db2_service.DB2Admin.create_user.assert_any_call(['user1'])
def test_delete_database(self):
databases = ['db1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.delete_database = MagicMock(return_value=None)
self.manager.delete_database(self.context, databases)
db2_service.DB2Admin.delete_database.assert_any_call(databases)
def test_delete_user(self):
user = ['user1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.delete_user = MagicMock(return_value=None)
self.manager.delete_user(self.context, user)
db2_service.DB2Admin.delete_user.assert_any_call(user)
def test_list_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
db2_service.DB2Admin.list_databases = MagicMock(
return_value=['database1'])
databases = self.manager.list_databases(self.context)
self.assertThat(databases, Not(Is(None)))
self.assertThat(databases, Equals(['database1']))
db2_service.DB2Admin.list_databases.assert_any_call(None, None, False)
def test_list_users(self):
db2_service.DB2Admin.list_users = MagicMock(return_value=['user1'])
users = self.manager.list_users(self.context)
self.assertThat(users, Equals(['user1']))
db2_service.DB2Admin.list_users.assert_any_call(None, None, False)
@patch.object(db2_service.DB2Admin, 'get_user',
return_value=MagicMock(return_value=['user1']))
def test_get_users(self, get_user_mock):
username = ['user1']
hostname = ['host']
mock_status = MagicMock()
self.manager.appStatus = mock_status
users = self.manager.get_user(self.context, username, hostname)
self.assertThat(users, Equals(get_user_mock.return_value))
get_user_mock.assert_any_call(username, hostname)
def test_rpc_ping(self):
output = self.manager.rpc_ping(self.context)
self.assertTrue(output)
def test_update_update_overrides(self):
configuration = {"DIAGSIZE": 50}
db2_service.DB2App.update_overrides = MagicMock()
self.manager.update_overrides(self.context, configuration, False)
db2_service.DB2App.update_overrides.assert_any_call(self.context,
configuration)
def test_reset_update_overrides(self):
configuration = {}
db2_service.DB2App.remove_overrides = MagicMock()
self.manager.update_overrides(self.context, configuration, True)
db2_service.DB2App.remove_overrides.assert_any_call()
|
|
"""Kernel Principal Components Analysis"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from scipy.sparse.linalg import eigsh
from ..utils import check_random_state
from ..utils.validation import check_is_fitted, check_array
from ..exceptions import NotFittedError
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(BaseEstimator, TransformerMixin):
"""Kernel Principal component analysis (KPCA)
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components : int, default=None
Number of components. If None, all non-zero components are kept.
kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel. Default="linear".
gamma : float, default=1/n_features
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
kernels.
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
coef0 : float, default=1
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
alpha : int, default=1.0
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
fit_inverse_transform : bool, default=False
Learn the inverse transform for non-precomputed kernels.
(i.e. learn to find the pre-image of a point)
eigen_solver : string ['auto'|'dense'|'arpack'], default='auto'
Select eigensolver to use. If n_components is much less than
the number of training samples, arpack may be more efficient
than the dense eigensolver.
tol : float, default=0
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, default=None
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
remove_zero_eig : boolean, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``eigen_solver`` == 'arpack'.
.. versionadded:: 0.18
copy_X : boolean, default=True
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
.. versionadded:: 0.18
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
Attributes
----------
lambdas_ : array, (n_components,)
Eigenvalues of the centered kernel matrix in decreasing order.
If `n_components` and `remove_zero_eig` are not set,
then all values are stored.
alphas_ : array, (n_samples, n_components)
Eigenvectors of the centered kernel matrix. If `n_components` and
`remove_zero_eig` are not set, then all components are stored.
dual_coef_ : array, (n_samples, n_features)
Inverse transform matrix. Only available when
``fit_inverse_transform`` is True.
X_transformed_fit_ : array, (n_samples, n_components)
Projection of the fitted data on the kernel principal components.
Only available when ``fit_inverse_transform`` is True.
X_fit_ : (n_samples, n_features)
The data used to fit the model. If `copy_X=False`, then `X_fit_` is
a reference. This attribute is used for the calls to transform.
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.decomposition import KernelPCA
>>> X, _ = load_digits(return_X_y=True)
>>> transformer = KernelPCA(n_components=7, kernel='linear')
>>> X_transformed = transformer.fit_transform(X)
>>> X_transformed.shape
(1797, 7)
References
----------
Kernel PCA was introduced in:
Bernhard Schoelkopf, Alexander J. Smola,
and Klaus-Robert Mueller. 1999. Kernel principal
component analysis. In Advances in kernel methods,
MIT Press, Cambridge, MA, USA 327-352.
"""
def __init__(self, n_components=None, kernel="linear",
gamma=None, degree=3, coef0=1, kernel_params=None,
alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
tol=0, max_iter=None, remove_zero_eig=False,
random_state=None, copy_X=True, n_jobs=None):
if fit_inverse_transform and kernel == 'precomputed':
raise ValueError(
"Cannot fit_inverse_transform with a precomputed kernel.")
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.remove_zero_eig = remove_zero_eig
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.n_jobs = n_jobs
self.copy_X = copy_X
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, n_jobs=self.n_jobs,
**params)
def _fit_transform(self, K):
""" Fit's using kernel K"""
# center kernel
K = self._centerer.fit_transform(K)
if self.n_components is None:
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == 'auto':
if K.shape[0] > 200 and n_components < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if eigen_solver == 'dense':
self.lambdas_, self.alphas_ = linalg.eigh(
K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
elif eigen_solver == 'arpack':
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, K.shape[0])
self.lambdas_, self.alphas_ = eigsh(K, n_components,
which="LA",
tol=self.tol,
maxiter=self.max_iter,
v0=v0)
# sort eigenvectors in descending order
indices = self.lambdas_.argsort()[::-1]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
# remove eigenvectors with a zero eigenvalue
if self.remove_zero_eig or self.n_components is None:
self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
self.lambdas_ = self.lambdas_[self.lambdas_ > 0]
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError("Inverse transform not implemented for "
"sparse matrices!")
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[::n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
self.X_transformed_fit_ = X_transformed
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy_X)
self._centerer = KernelCenterer()
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self.fit(X, **params)
X_transformed = self.alphas_ * np.sqrt(self.lambdas_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_))
def inverse_transform(self, X):
"""Transform X back to original space.
Parameters
----------
X : array-like, shape (n_samples, n_components)
Returns
-------
X_new : array-like, shape (n_samples, n_features)
References
----------
"Learning to Find Pre-Images", G BakIr et al, 2004.
"""
if not self.fit_inverse_transform:
raise NotFittedError("The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available.")
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
|
|
# Copyright 2014 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import json
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.nsxlib import _build_uri_path
from neutron.plugins.vmware.nsxlib import do_request
from neutron.plugins.vmware.nsxlib import get_all_query_pages
from neutron.plugins.vmware.nsxlib import switch
HTTP_GET = "GET"
HTTP_POST = "POST"
HTTP_DELETE = "DELETE"
HTTP_PUT = "PUT"
GWSERVICE_RESOURCE = "gateway-service"
TRANSPORTNODE_RESOURCE = "transport-node"
LOG = log.getLogger(__name__)
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
"""Create a NSX Layer-2 Network Gateway Service.
:param cluster: The target NSX cluster
:param tenant_id: Identifier of the Openstack tenant for which
the gateway service.
:param display_name: Descriptive name of this gateway service
:param devices: List of transport node uuids (and network
interfaces on them) to use for the network gateway service
:raise NsxApiException: if there is a problem while communicating
with the NSX controller
"""
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
# NSX is actually the identifier a physical interface on the gateway
# device, which in the Neutron API is referred as interface_name
gateways = [{"transport_node_uuid": device['id'],
"device_id": device['interface_name'],
"type": "L2Gateway"} for device in devices]
gwservice_obj = {
"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id),
"gateways": gateways,
"type": "L2GatewayServiceConfig"
}
return do_request(
HTTP_POST, _build_uri_path(GWSERVICE_RESOURCE),
json.dumps(gwservice_obj), cluster=cluster)
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
gateway_id, vlan_id=None):
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
att_obj = {'type': 'L2GatewayAttachment',
'l2_gateway_service_uuid': gateway_id}
if vlan_id:
att_obj['vlan_id'] = vlan_id
return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj)
def get_l2_gw_service(cluster, gateway_id):
return do_request(
HTTP_GET, _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def get_l2_gw_services(cluster, tenant_id=None,
fields=None, filters=None):
actual_filters = dict(filters or {})
if tenant_id:
actual_filters['tag'] = tenant_id
actual_filters['tag_scope'] = 'os_tid'
return get_all_query_pages(
_build_uri_path(GWSERVICE_RESOURCE,
filters=actual_filters),
cluster)
def update_l2_gw_service(cluster, gateway_id, display_name):
# TODO(salvatore-orlando): Allow updates for gateways too
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
if not display_name:
# Nothing to update
return gwservice_obj
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
return do_request(HTTP_PUT, _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
json.dumps(gwservice_obj), cluster=cluster)
def delete_l2_gw_service(cluster, gateway_id):
do_request(HTTP_DELETE, _build_uri_path(GWSERVICE_RESOURCE,
resource_id=gateway_id),
cluster=cluster)
def _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid):
connector_type_mappings = {
utils.NetworkTypes.STT: "STTConnector",
utils.NetworkTypes.GRE: "GREConnector",
utils.NetworkTypes.BRIDGE: "BridgeConnector",
'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT",
'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
nsx_connector_type = connector_type_mappings.get(connector_type)
body = {"display_name": utils.check_and_truncate(display_name),
"tags": utils.get_tags(os_tid=tenant_id,
q_gw_dev_id=neutron_id),
"admin_status_enabled": True}
if connector_ip and nsx_connector_type:
body["transport_connectors"] = [
{"transport_zone_uuid": tz_uuid,
"ip_address": connector_ip,
"type": nsx_connector_type}]
if client_certificate:
body["credential"] = {"client_certificate":
{"pem_encoded": client_certificate},
"type": "SecurityCertificateCredential"}
return body
def create_gateway_device(cluster, tenant_id, display_name, neutron_id,
tz_uuid, connector_type, connector_ip,
client_certificate):
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid)
try:
return do_request(
HTTP_POST, _build_uri_path(TRANSPORTNODE_RESOURCE),
json.dumps(body), cluster=cluster)
except api_exc.InvalidSecurityCertificate:
raise nsx_exc.InvalidSecurityCertificate()
def update_gateway_device(cluster, gateway_id, tenant_id,
display_name, neutron_id,
tz_uuid, connector_type, connector_ip,
client_certificate):
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
connector_type, connector_ip,
client_certificate, tz_uuid)
try:
return do_request(
HTTP_PUT,
_build_uri_path(TRANSPORTNODE_RESOURCE, resource_id=gateway_id),
json.dumps(body), cluster=cluster)
except api_exc.InvalidSecurityCertificate:
raise nsx_exc.InvalidSecurityCertificate()
def delete_gateway_device(cluster, device_uuid):
return do_request(HTTP_DELETE,
_build_uri_path(TRANSPORTNODE_RESOURCE,
device_uuid),
cluster=cluster)
def get_gateway_device_status(cluster, device_uuid):
status_res = do_request(HTTP_GET,
_build_uri_path(TRANSPORTNODE_RESOURCE,
device_uuid,
extra_action='status'),
cluster=cluster)
# Returns the connection status
return status_res['connection']['connected']
def get_gateway_devices_status(cluster, tenant_id=None):
if tenant_id:
gw_device_query_path = _build_uri_path(
TRANSPORTNODE_RESOURCE,
fields="uuid,tags",
relations="TransportNodeStatus",
filters={'tag': tenant_id,
'tag_scope': 'os_tid'})
else:
gw_device_query_path = _build_uri_path(
TRANSPORTNODE_RESOURCE,
fields="uuid,tags",
relations="TransportNodeStatus")
response = get_all_query_pages(gw_device_query_path, cluster)
results = {}
for item in response:
results[item['uuid']] = (item['_relations']['TransportNodeStatus']
['connection']['connected'])
return results
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Basic Task classes."""
import functools
import itertools
from absl import logging
from flax.deprecated import nn
from flax.training import common_utils
import jax
import jax.numpy as jnp
import scipy
from gift.data import all_datasets
from gift.tasks import all_metrics
from gift.tasks import domain_mapping_utils
from gift.tasks import metrics
class Task(object):
"""Base Task class.
Task objects contain all the information about the objective of the
training, evaluation metrics, and the dataset.
"""
def __init__(self, task_params, num_shards, regularisers=None):
"""Init task objects.
Args:
task_params: ConfigDict; hyperparameters of the task.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
regularisers: list of functions; List of auxilary losses that get module
parameters as input (L2 loss is handled seperately).
"""
self.task_params = task_params
self.dataset_name = task_params.get('dataset_name')
self.regularisers = regularisers
self.load_dataset(self.dataset_name, num_shards)
self.task_params.output_dim = self.dataset.meta_data['num_classes']
def load_dataset(self, dataset_name, num_shards):
"""Loads the dataset for the task.
Args:
dataset_name: str; Name of the dataset.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
"""
self.dataset = all_datasets.get_dataset(dataset_name)(
batch_size=self.task_params.local_batch_size,
eval_batch_size=self.task_params.eval_local_batch_size,
resolution=self.task_params.get('resolution', None),
data_augmentations=self.task_params.get('data_augmentations', None),
teacher_data_augmentations=self.task_params.get(
'teacher_data_augmentations', None),
num_shards=num_shards)
def loss_function(self, logits, batch, model_params=None, step=None):
raise NotImplementedError
def metrics_fn(self, logits, batch):
raise NotImplementedError
def get_l2_rate(self, step):
del step
return self.task_params.get('l2_decay_factor')
class ClassificationTask(Task):
"""Classification Task."""
def __init__(self, task_params, num_shards):
"""Initializing Classification based Tasks.
Args:
task_params: configdict; Hyperparameters of the task.
num_shards: int; Number of deviced that we shard the batch over.
"""
super().__init__(task_params, num_shards)
loss_fn_name = self.task_params.get('main_loss', None)
if loss_fn_name is None:
if self.dataset.meta_data['num_classes'] == 1:
# Use the loss function for binary classification.
loss_fn_name = 'sigmoid_cross_entropy'
else:
loss_fn_name = 'categorical_cross_entropy'
self.main_loss_fn = functools.partial(metrics.weighted_loss,
all_metrics.ALL_LOSSES[loss_fn_name])
_METRICS = all_metrics.CLASSIFICATION_METRICS
def metrics_fn(self, logits, batch):
"""Calculates metrics for the classification task.
Args:
logits: float array; Output of the model->[batch, length, num_classes].
batch: dict; Batch of data that has 'label' and optionally 'weights'.
Returns:
a dict of metrics.
"""
target_is_onehot = logits.shape == batch['label'].shape
if target_is_onehot:
one_hot_targets = batch['label']
else:
one_hot_targets = common_utils.onehot(batch['label'], logits.shape[-1])
if self.dataset.meta_data['num_classes'] == 1:
# If this is a binary classification task, make sure the shape of labels
# is (bs, 1) and is the same as the shape of logits.
one_hot_targets = jnp.reshape(one_hot_targets, logits.shape)
if self.task_params.get('class_indices'):
possible_labels_indices = self.task_params.get('class_indices')
one_hot_targets = one_hot_targets[:, possible_labels_indices]
logits = logits[:, possible_labels_indices]
weights = batch.get('weights') # weights might not be defined
metrics_dic = {}
for key in self._METRICS:
metric_val, metric_normalizer = self._METRICS[key](logits,
one_hot_targets,
weights)
metrics_dic[key] = (jax.lax.psum(metric_val, 'batch'),
jax.lax.psum(metric_normalizer, 'batch'))
# Store dataset related factors.
for key in batch:
if 'factor' in key:
factors = batch[key]
if weights is not None:
val = jnp.sum(metrics.apply_weights(factors, weights))
norm = jnp.sum(weights)
else:
val = jnp.sum(factors)
norm = len(factors)
metrics_dic[key] = (jax.lax.psum(val,
'batch'), jax.lax.psum(norm, 'batch'))
return metrics_dic
def loss_function(self, logits, batch, model_params=None, step=None):
"""Return cross entropy loss with an L2 penalty on the weights."""
weights = batch.get('weights')
if self.dataset.meta_data['num_classes'] == 1:
# If this is a binary classification task, make sure the shape of labels
# is (bs, 1) and is the same as the shape of logits.
targets = jnp.reshape(batch['label'], logits.shape)
elif batch['label'].shape[-1] == self.dataset.meta_data['num_classes']:
# If the labels are already the shape of (bs, num_classes) use them as is.
targets = batch['label']
else:
# Otherwise convert the labels to onehot labels.
targets = common_utils.onehot(batch['label'], logits.shape[-1])
loss_value, loss_normalizer = self.main_loss_fn(
logits,
targets,
weights,
label_smoothing=self.task_params.get('label_smoothing'))
total_loss = loss_value / loss_normalizer
if model_params:
l2_decay_factor = self.get_l2_rate(step)
if l2_decay_factor is not None:
l2_loss = metrics.l2_regularization(
model_params,
include_bias_terms=self.task_params.get('l2_for_bias', False))
total_loss = total_loss + 0.5 * l2_decay_factor * l2_loss
if self.regularisers:
for reg_fn in self.regularisers:
total_loss += reg_fn(model_params)
return total_loss
class MultiEnvClassificationTask(ClassificationTask):
"""Multi environment classification Task."""
_METRICS = all_metrics.MULTI_ENV_CLASSIFICATION_METRICS
def load_dataset(self, dataset_name, num_shards):
"""Loads the dataset for the task.
Args:
dataset_name: str; Name of the dataset.
num_shards: int; Number of shards used for data parallelization (should
normally be set to `jax.device_count()`).
"""
self.dataset = all_datasets.get_dataset(dataset_name)(
batch_size=self.task_params.local_batch_size,
eval_batch_size=self.task_params.eval_local_batch_size,
num_shards=num_shards,
resolution=self.task_params.get('resolution', None),
data_augmentations=self.task_params.get('data_augmentations', None),
teacher_data_augmentations=self.task_params.get(
'teacher_data_augmentations', None),
train_environments=self.task_params.train_environments,
eval_environments=self.task_params.eval_environments)
def aggregate_envs_losses(self, env_losses):
"""Aggregate losses of all environments.
Args:
env_losses: list(float); list of losses of the environments.
Returns:
Average of the env losses.
"""
return jnp.mean(jnp.array(env_losses))
def environments_penalties(self, env_logits, env_batches):
"""Computes a penalty term based on inconsistencies between different env.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0 which
is a float array of shape `[batch, length, num_classes]`).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0 that
has 'label' and optionally 'weights'.).
Returns:
Environments penalty term for the loss.
"""
del env_logits
del env_batches
return 0
def penalty_weight(self, step):
"""Return the weight of the environments penalty term in the loss.
Args:
step: int; Number of training steps passed so far.
Returns:
float; Weight of the environment penalty term.
"""
del step
return 0
def metrics_fn(self, env_logits, env_batches, env_ids, params):
"""Calculates metrics for the classification task.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0 which
is a float array of shape `[batch, length, num_classes]`).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0 that
has 'label' and optionally 'weights'.).
env_ids: list(int); List of environment codes.
params: pytree; parameters of the model.
Returns:
a dict of metrics.
"""
metrics_dic = {}
envs_metrics_dic = {}
# Add all the keys to envs_metrics_dic, each key will point to a list of
# values from the correspondig metric for each environment.
# Task related metrics
for key in self._METRICS:
envs_metrics_dic[key] = []
# Dataset related metrics (e.g., perturbation factors)
for key in env_batches[0]:
if 'factor' in key:
envs_metrics_dic[key] = []
for i in range(len(env_logits)):
logits = env_logits[i]
batch = env_batches[i]
env_name = self.dataset.get_full_env_name(self.dataset.id2env(env_ids[i]))
env_metric_dic = super().metrics_fn(logits, batch)
for key in env_metric_dic:
metrics_dic[env_name + '/' + key] = env_metric_dic[key]
envs_metrics_dic[key].append(env_metric_dic[key])
# Add overall metric values over all environments,
for key in self._METRICS:
metrics_dic[key] = (jnp.sum(
jnp.array(jnp.array(envs_metrics_dic[key])[:, 0])),
jnp.sum(
jnp.array(jnp.array(envs_metrics_dic[key])[:,
1])))
if params:
metrics_dic['l2'] = metrics.l2_regularization(
params, include_bias_terms=self.task_params.get('l2_for_bias', False))
return metrics_dic
def get_env_losses(self, env_logits, env_batches):
"""Computes and return the loss on each environment.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
Returns:
List of loss values in all environments.
"""
env_losses = []
for i in range(len(env_logits)):
logits = env_logits[i]
batch = env_batches[i]
ce_loss = super().loss_function(logits, batch)
env_losses.append(ce_loss)
return env_losses
def loss_function(self, env_logits, env_batches, model_params=None, step=0):
"""Returns loss with an L2 penalty on the weights.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
model_params: dict; Parameters of the model (used to commpute l2).
step: int; Global training step.
Returns:
Total loss.
"""
env_losses = self.get_env_losses(env_logits, env_batches)
total_loss = self.aggregate_envs_losses(env_losses)
p_weight = self.penalty_weight(step)
total_loss += p_weight * self.environments_penalties(
env_logits, env_batches)
if model_params:
l2_decay_rate = self.get_l2_rate(step)
if l2_decay_rate is not None:
l2_loss = metrics.l2_regularization(
model_params,
include_bias_terms=self.task_params.get('l2_for_bias', False))
total_loss = total_loss + 0.5 * l2_decay_rate * l2_loss
if self.regularisers:
for reg_fn in self.regularisers:
reg_value = reg_fn(model_params)
total_loss += reg_value
# If p_weights > 1:
# Rescale the entire loss to keep gradients in a reasonable range.
total_loss /= jnp.maximum(p_weight, 1)
return total_loss
class MultiEnvIRMClassificationTask(MultiEnvClassificationTask):
"""Multi environment task with IRM loss.
Reference:[Invariant Risk Minimization](https://arxiv.org/abs/1907.02893)
"""
def penalty_weight(self, step):
"""Return the weight of the environments penalty term in the loss.
This is a step-function (assuming step is an integer).
Here is what we want to do in this method:
def f(step):
if step < self.task_params.penalty_anneal_iters:
return 1
else:
return self.task_params.penalty_weight
Because this method is called within a jax pmapped function, we cannot
use if-statements that depend on the input arguments. Luckily, the
functionality we need can be implemented as the sum of two step functions
which we can implement with min max operations, conditioned on the step
argument and task_params.penalty_anneal_iters being integers.
standard step function: 1 if x > b else 0 --> max(0, min(1, x-b))
Args:
step: int; Number of training steps passed so far.
Returns:
float; Weight of the environment penalty term.
"""
if step is None:
step = 0
# make sure penalty_anneal_iters is an integer.
assert self.task_params.penalty_anneal_iters == int(
self.task_params.penalty_anneal_iters), ('The penalty_anneal_iters '
'param is not an integer.')
b = self.task_params.penalty_anneal_iters
w1 = jnp.maximum(0.0, jnp.minimum(1, b - step))
w2 = jnp.maximum(0.0, jnp.minimum(
1, step - b)) * self.task_params.penalty_weight
return w1 + w2
def environments_penalties(self, env_logits, env_batches):
"""Computes the penalty part of the IRM loss.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
Returns:
Average of environment penalties (IRM penalty).
"""
penalties = []
for i in range(len(env_logits)):
logits = env_logits[i]
batch = env_batches[i]
weights = batch.get('weights')
if self.dataset.meta_data['num_classes'] == 1:
# If this is a binary classification task, make sure the shape of labels
# is (bs, 1) and is the same as the shape of logits.
targets = jnp.reshape(batch['label'], logits.shape)
elif batch['label'].shape[-1] == self.dataset.meta_data['num_classes']:
# If the labels are already the shape of (bs, num_classes) use them as
# they are.
targets = batch['label']
else:
# Otherwise convert the labels to onehot labels.
targets = common_utils.onehot(batch['label'], logits.shape[-1])
penalties.append(
metrics.irm_env_penalty(
logits=logits,
targets=targets,
weights=weights,
loss_fn=self.main_loss_fn)[0])
return jnp.mean(jnp.array(penalties))
class MultiEnvVRexClassificationTask(MultiEnvClassificationTask):
"""Multi environment task with V-Rex loss.
Reference:
[Out-of-Distribution Generalization via Risk Extrapolation]
(https://arxiv.org/pdf/2003.00688.pdf)
"""
def environments_penalties(self, env_losses):
"""Computes the penalty part of the V-Rex loss.
Equation 9 in
[Out-of-Distribution Generalization via Risk Extrapolation]
(https://arxiv.org/pdf/2003.00688.pdf)
Args:
env_losses: list(float): Loss value for all the environments.
Returns:
V-Rex penalty (which is the variance of all losess).
"""
return jnp.var(jnp.array(env_losses))
def penalty_weight(self, step):
"""Return the weight of the environments penalty term in the loss.
This is a step-function (assuming step is an integer).
Here is what we want to do in this method:
def f(step):
if step < self.task_params.penalty_anneal_iters:
return 1
else:
return self.task_params.penalty_weight
Because this method is called within a jax pmapped function, we cannot
use if-statements that depend on the input arguments. Luckily, the
functionality we need can be implemented as the sum of two step functions
which we can implement with min max operations, conditioned on the step
argument and task_params.penalty_anneal_iters being integers.
standard step function: 1 if x > b else 0 --> max(0, min(1, x-b))
Args:
step: int; Number of training steps passed so far.
Returns:
float; Weight of the environment penalty term.
"""
if step is None:
step = 0
# make sure penalty_anneal_iters is an integer.
assert self.task_params.penalty_anneal_iters == int(
self.task_params.penalty_anneal_iters), ('The penalty_anneal_iters '
'param is not an integer.')
b = self.task_params.penalty_anneal_iters
w1 = jnp.maximum(0.0, jnp.minimum(1, b - step))
w2 = jnp.maximum(0.0, jnp.minimum(
1, step - b)) * self.task_params.penalty_weight
return w1 + w2
def loss_function(self, env_logits, env_batches, model_params=None, step=0):
"""Returns loss with an L2 penalty on the weights.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
model_params: dict; Parameters of the model (used to commpute l2).
step: int; Global training step.
Returns:
Total loss.
"""
env_losses = self.get_env_losses(env_logits, env_batches)
total_loss = self.aggregate_envs_losses(env_losses)
p_weight = self.penalty_weight(step)
total_loss += self.penalty_weight(step) * self.environments_penalties(
env_losses)
l2_decay_rate = self.get_l2_rate(step)
if l2_decay_rate is not None:
l2_loss = metrics.l2_regularization(
model_params,
include_bias_terms=self.task_params.get('l2_for_bias', False))
total_loss += 0.5 * l2_decay_rate * l2_loss
# If p_weights > 1:
# Rescale the entire loss to keep gradients in a reasonable range.
total_loss /= jnp.maximum(p_weight, 1)
return total_loss
class MultiEnvLinearDomainMappingClassification(MultiEnvClassificationTask):
"""Multi environment task with Domain Mapping.
Domain mapping adds an auxiliary loss that encourages
the model to have equivariant representations with respect to the environment.
"""
def get_transformer_module(self, hidden_reps_dim):
class Linear(nn.Module):
def apply(self, x):
x = nn.Dense(x, hidden_reps_dim, name='l1', bias=True)
return x
return Linear
def setup_transformers(self, hidden_reps_dim):
"""Sets up linear transformers for the auxiliary loss.
Args:
hidden_reps_dim: int; Dimensionality of the representational space (size
of the representations used for computing the domain mapping loss.
"""
transformer_class = self.get_transformer_module(hidden_reps_dim)
self.state_transformers = {}
env_keys = list(map(int, self.dataset.splits.train.keys()))
# Get list of all possible environment pairs (this includes
# different permutations).
env_pairs = list(itertools.permutations(env_keys, 2))
rng = nn.make_rng()
for env_pair in env_pairs:
rng, params_rng = jax.random.split(rng)
_, init_params = transformer_class.init_by_shape(
params_rng, [((1, hidden_reps_dim), jnp.float32)])
self.state_transformers[env_pair] = nn.Model(transformer_class,
init_params)
def loss_function(self,
env_logits,
env_reps,
env_batches,
env_ids,
model_params=None,
step=0,
env_aligned_pairs_idx=None):
"""Returns loss with an L2 penalty on the weights.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env 0).
env_reps: list; hidden reps for different environments (similar to
env_logits).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from env 0).
env_ids: list(int): List of environment codes.
model_params: dict; Parameters of the model (used to compute l2).
step: int; Global training step.
env_aligned_pairs_idx: dict; Environment pair --> alignment (if None the
alignment is computed).
Returns:
Total loss.
"""
total_loss = super().loss_function(
env_logits=env_logits,
env_batches=env_batches,
model_params=model_params,
step=step)
if env_ids:
# If env_ids is None, we do not compute domain_mapping_loss:
total_loss += self.domain_mapping_loss(env_reps, env_batches, env_ids,
env_aligned_pairs_idx)
return total_loss
def get_env_aligned_pairs_idx(self, env_reps, env_batches, env_ids):
"""Computes and returns aligned pairs.
Args:
env_reps: list; List of different envs representations.
env_batches: list; List of different envs batches.
env_ids: list(int): List of environment codes.
Returns:
Aligned pairs indices (aligned rows, aligned columns).
"""
env_pairs = list(itertools.permutations(env_ids, 2))
env_aligned_pairs_idx = {}
for pair in env_pairs:
e1, e2 = pair
# We only have state_transformer for training envs.
if pair not in self.state_transformers:
logging.warn('Pair %s is not in the training pairs set.', str(pair))
else:
e1_index = env_ids.index(e1)
e2_index = env_ids.index(e2)
e1_labels = env_batches[e1_index]['label']
e2_labels = env_batches[e2_index]['label']
# Get representations for env1.
e1_reps = env_reps[e1_index]
# Get representations for env1.
e2_reps = env_reps[e2_index]
# Transform representations from env1.
transformed_e1 = self.state_transformers[pair](e1_reps)
env_aligned_pairs_idx[pair] = self.align_batches(
transformed_e1, e2_reps, e1_labels, e2_labels)
return env_aligned_pairs_idx
def get_bipartite_env_aligned_pairs_idx(self, env_reps, env_batches, env_ids,
env_reps2, env_batches2, env_ids2):
"""Computes and returns aligned pairs between two sets of environments.
Args:
env_reps: list; List of different envs representations.
env_batches: list; List of different envs batches.
env_ids: list(int): List of environment codes.
env_reps2: list; List of different envs representations.
env_batches2: list; List of different envs batches.
env_ids2: list(int): List of environment codes.
Returns:
Aligned pairs indices (aligned rows, aligned columns).
"""
env_pairs = list(itertools.product(env_ids, env_ids2))
env_aligned_pairs_idx = {}
for pair in env_pairs:
e1, e2 = pair
# We only have state_transformer for training envs.
if pair not in self.state_transformers:
logging.warn('Pair %s is not in the training pairs set.', str(pair))
else:
e1_index = env_ids.index(e1)
e2_index = env_ids2.index(e2)
e1_labels = env_batches[e1_index]['label']
e2_labels = env_batches2[e2_index]['label']
# Get representations for env1.
e1_reps = env_reps[e1_index]
# Get representations for env1.
e2_reps = env_reps2[e2_index]
# Transform representations from env1.
transformed_e1 = self.state_transformers[pair](e1_reps)
env_aligned_pairs_idx[pair] = self.align_batches(
transformed_e1, e2_reps, e1_labels, e2_labels)
# Convert alignments which is the array of aligned indices to match mat.
alignments = jnp.asarray(env_aligned_pairs_idx[pair])
batch_size = alignments.shape[1]
matching_matrix = jnp.zeros(
shape=(batch_size, batch_size), dtype=jnp.float32)
env_aligned_pairs_idx[pair] = matching_matrix.at[alignments[0],
alignments[1]].set(1.0)
return env_aligned_pairs_idx
def domain_mapping_loss(self,
env_reps,
env_batches,
env_ids,
env_aligned_pairs_idx=None):
"""Compute Linear Transformation Constraint loss.
Args:
env_reps: list; List of different envs representations.
env_batches: list; List of different envs batches.
env_ids: list(int): List of environment codes.
env_aligned_pairs_idx: dict; Environment pair --> alignment. (if None the
alignment is computed).
Returns:
Domain mapping loss (float).
"""
mask_loss_diff_labels = self.task_params.get('mask_loss_diff_labels')
# Get all possible environment pairs
env_pairs = list(itertools.permutations(env_ids, 2))
aux_losses = []
l2s = []
for pair in env_pairs:
e1, e2 = pair
# We only have state_transformer for training envs.
if pair not in self.state_transformers:
logging.warn('Pair %s is not in the training pairs set.', str(pair))
else:
e1_index = env_ids.index(e1)
e2_index = env_ids.index(e2)
e1_labels = env_batches[e1_index]['label']
e2_labels = env_batches[e2_index]['label']
# Get representations for env1.
e1_reps = env_reps[e1_index]
# Get representations for env1.
e2_reps = env_reps[e2_index]
# Transform representations from env1.
transformed_e1 = self.state_transformers[pair](e1_reps)
if env_aligned_pairs_idx is None:
aligned_pairs_idx = self.align_batches(transformed_e1, e2_reps,
e1_labels, e2_labels)
else:
aligned_pairs_idx = env_aligned_pairs_idx[pair]
if mask_loss_diff_labels:
# Assign zero/one weights to each example pair based on the alignment
# of their labels.
pair_weights = jnp.float32(e1_labels[aligned_pairs_idx[0]] ==
e2_labels[aligned_pairs_idx[1]])
else:
pair_weights = jnp.ones_like(e1_labels, dtype='float32')
# Compute domain mapping loss for the environment pair:
# Get representations for env1.
transformed_e1 = transformed_e1[aligned_pairs_idx[0]]
# Get corresponding representations for env2.
e2_reps = env_reps[e2_index][aligned_pairs_idx[1]]
# Minimize the distance between transformed reps from env1 and reps
# from env2.
aux_losses.append(
jnp.mean(
jnp.linalg.norm(transformed_e1 - e2_reps, axis=-1) *
pair_weights))
# Add l2 loss for the transformer weights (to make sure it is as minimal
# as possible.
l2s.append(
metrics.l2_regularization(
self.state_transformers[pair].params,
include_bias_terms=self.task_params.get('l2_for_bias', False)))
if not aux_losses:
aux_losses = [0]
l2s = [0]
alpha = self.task_params.get('aux_weight', .0)
beta = self.task_params.get('aux_l2', .0)
# Average and return the final weighted value of the loss.
return alpha * jnp.mean(jnp.array(aux_losses)) + beta * jnp.mean(
jnp.array(l2s))
def align_batches(self, x, y, x_labels, y_labels, supervised=True):
"""Computes alignment between two mini batches.
In the MultiEnvDomainMappingClassification, this calls the random alignment
(based on labels) function.
Args:
x: jnp array; Batch of representations with shape '[bs, feature_size]'.
y: jnp array; Batch of representations with shape '[bs, feature_size]'.
x_labels: jnp array; labels of x with shape '[bs, 1]'.
y_labels: jnp array; labels of y with shape '[bs, 1]'.
supervised: bool; If False we can not use y_labels and it defaults back to
random alignment otherwise it does label based alignment (tries to align
examples that have similar labels).
Returns:
aligned indexes of x, aligned indexes of y.
"""
del y
# Get aligned example pairs.
if supervised:
rng = nn.make_rng()
new_rngs = jax.random.split(rng, len(x_labels))
aligned_pairs_idx = domain_mapping_utils.align_examples(
new_rngs, x_labels, jnp.arange(len(x_labels)), y_labels)
else:
number_of_examples = len(x)
rng = nn.make_rng()
matching_matrix = jnp.eye(number_of_examples)
matching_matrix = jax.random.permutation(rng, matching_matrix)
aligned_pairs_idx = jnp.arange(len(x)), jnp.argmax(
matching_matrix, axis=-1)
return aligned_pairs_idx
class MultiEnvNonLinearDomainMappingClassification(
MultiEnvLinearDomainMappingClassification):
"""Non linear Domain Mapping."""
def get_transformer_module(self, hidden_reps_dim):
class NonLinear(nn.Module):
def apply(self, x):
x = nn.Dense(x, hidden_reps_dim, bias=True, name='l1')
x = nn.relu(x)
x = nn.Dense(x, hidden_reps_dim, bias=True, name='l2')
return x
return NonLinear
class MultiEnvHungarianDomainMappingClassification(
MultiEnvLinearDomainMappingClassification):
"""Non linear Domain Mapping."""
def align_batches(self, x, y, x_labels, y_labels):
"""Computes alignment between two mini batches.
In the MultiEnvHungarianDomainMappingClassification, this calls the
hungarian matching function.
Args:
x: jnp array; Batch of representations with shape '[bs, feature_size]'.
y: jnp array; Batch of representations with shape '[bs, feature_size]'.
x_labels: jnp array; labels of x with shape '[bs, 1]'.
y_labels: jnp array; labels of y with shape '[bs, 1]'.
Returns:
aligned indexes of x, aligned indexes of y.
"""
label_cost = self.task_params.get('ot_label_cost', 0.)
cost = domain_mapping_utils.pairwise_l2(x, y)
# Adjust cost such that representations with different labels
# get assigned a very high cost.
same_labels = domain_mapping_utils.pairwise_equality_1d(x_labels, y_labels)
adjusted_cost = cost + (1 - same_labels) * label_cost
# `linear_sum_assignment` computes cheapest hard alignment.
x_ind, y_ind = scipy.optimize.linear_sum_assignment(adjusted_cost)
return x_ind, y_ind
class MultiEnvIdentityDomainMappingClassification(
MultiEnvLinearDomainMappingClassification):
"""Multi environment task with Indentity Domain Mapping.
Domain mapping adds an auxiliary loss that encourages
the model to have equivariant representations with respect to the environment.
Using domain mapping with identity mapping simply means that the domain
mapping loss is the L2 distance between examples from different domains.
"""
def get_transformer_module(self, hidden_reps_dim):
"""Return the domain mapping module."""
# TODO(samiraabnar): Find a way to avoid defining these naive mapping
# models.
class Idenity(nn.Module):
"""Does nothing but returns the input itself."""
def apply(self, x):
return x
return Idenity
class MultiEnvSinkhornDomainMappingClassification(
MultiEnvIdentityDomainMappingClassification):
"""Multi env CLS with Sinkhorn-based matching."""
def get_bipartite_env_aligned_pairs_idx(self, env_reps, env_batches, env_ids,
env_reps2, env_batches2, env_ids2):
"""Computes and returns aligned pairs between two sets of environments.
Args:
env_reps: list; List of different envs representations.
env_batches: list; List of different envs batches.
env_ids: list(int): List of environment codes.
env_reps2: list; List of different envs representations.
env_batches2: list; List of different envs batches.
env_ids2: list(int): List of environment codes.
Returns:
Aligned pairs indices (aligned rows, aligned columns).
"""
env_pairs = list(itertools.product(env_ids, env_ids2))
env_aligned_pairs_idx = {}
for pair in env_pairs:
e1, e2 = pair
# We only have state_transformer for training envs.
if pair not in self.state_transformers:
logging.warn('Pair %s is not in the training pairs set.', str(pair))
else:
e1_index = env_ids.index(e1)
e2_index = env_ids2.index(e2)
e1_labels = env_batches[e1_index]['label']
e2_labels = env_batches2[e2_index]['label']
# Get representations for env1.
e1_reps = env_reps[e1_index]
# Get representations for env2.
e2_reps = env_reps2[e2_index]
# Transform representations from env1.
transformed_e1 = self.state_transformers[pair](e1_reps)
env_aligned_pairs_idx[pair] = self.align_batches(
transformed_e1, e2_reps, e1_labels, e2_labels)
return env_aligned_pairs_idx
def align_batches(self, x, y, x_labels, y_labels):
"""Computes optimal transport between two batches with Sinkhorn algorithm.
This calls a sinkhorn solver in dual (log) space with a finite number
of iterations and uses the dual unregularized transport cost as the OT cost.
Args:
x: jnp array; Batch of representations with shape '[bs, feature_size]'.
y: jnp array; Batch of representations with shape '[bs, feature_size]'.
x_labels: jnp array; labels of x with shape '[bs, 1]'.
y_labels: jnp array; labels of y with shape '[bs, 1]'.
Returns:
ot_cost: scalar optimal transport loss.
"""
epsilon = self.task_params.get('sinkhorn_eps', 0.1)
num_iters = self.task_params.get('sinkhorn_iters', 50)
label_weight = self.task_params.get('ot_label_cost', 0.)
l2_weight = self.task_params.get('ot_l2_cost', 0.)
noise_weight = self.task_params.get('ot_noise_cost', 1.0)
x = x.reshape((x.shape[0], -1))
y = y.reshape((x.shape[0], -1))
# Solve sinkhorn in log space.
num_x = x.shape[0]
num_y = y.shape[0]
x = x.reshape((num_x, -1))
y = y.reshape((num_y, -1))
# Marginal of rows (a) and columns (b)
a = jnp.ones(shape=(num_x,), dtype=x.dtype)
b = jnp.ones(shape=(num_y,), dtype=y.dtype)
# TODO(samiraabnar): Check range of l2 cost?
cost = domain_mapping_utils.pairwise_l2(x, y)
# Adjust cost such that representations with different labels
# get assigned a very high cost.
same_labels = domain_mapping_utils.pairwise_equality_1d(x_labels, y_labels)
adjusted_cost = (1 - same_labels) * label_weight + l2_weight * cost
# Add noise to the cost.
adjusted_cost += noise_weight * jax.random.uniform(
nn.make_rng(), minval=0, maxval=1.0)
_, matching, _ = domain_mapping_utils.sinkhorn_dual_solver(
a, b, adjusted_cost, epsilon, num_iters)
matching = domain_mapping_utils.round_coupling(matching, a, b)
if self.task_params.get('interpolation_mode', 'hard') == 'hard':
matching = domain_mapping_utils.sample_best_permutation(
nn.make_rng(), coupling=matching, cost=adjusted_cost)
return matching
def ot_loss(self, x, y, x_labels, y_labels):
"""Computes optimal transport between two batches with Sinkhorn algorithm.
This calls a sinkhorn solver in dual (log) space with a finite number
of iterations and uses the dual unregularized transport cost as the OT cost.
Args:
x: jnp array; Batch of representations with shape '[bs, feature_size]'.
y: jnp array; Batch of representations with shape '[bs, feature_size]'.
x_labels: jnp array; labels of x with shape '[bs, 1]'.
y_labels: jnp array; labels of y with shape '[bs, 1]'.
Returns:
ot_cost: scalar optimal transport loss.
"""
epsilon = self.task_params.get('sinkhorn_eps', 0.1)
num_iters = self.task_params.get('sinkhorn_iters', 100)
label_cost = self.task_params.get('ot_label_cost', 0.)
# Solve sinkhorn in log space.
num_x = x.shape[0]
num_y = y.shape[0]
x = x.reshape((num_x, -1))
y = y.reshape((num_y, -1))
# Marginal of rows (a) and columns (b)
a = jnp.ones(shape=(num_x,), dtype=x.dtype) / float(num_x)
b = jnp.ones(shape=(num_y,), dtype=y.dtype) / float(num_y)
cost = domain_mapping_utils.pairwise_l2(x, y)
# Adjust cost such that representations with different labels
# get assigned a very high cost.
same_labels = domain_mapping_utils.pairwise_equality_1d(x_labels, y_labels)
# adjusted_cost = same_labels * cost + (1 - same_labels) * (
# label_cost * jnp.max(cost))
adjusted_cost = cost + (1 - same_labels) * label_cost
ot_cost, _, _ = domain_mapping_utils.sinkhorn_dual_solver(
a, b, adjusted_cost, epsilon, num_iters)
return ot_cost
def domain_mapping_loss(self,
env_reps,
env_batches,
env_ids,
env_aligned_pairs_idx=None):
"""Compute Linear Transformation Constraint loss.
Args:
env_reps: list; List of different envs representations.
env_batches: list; List of different envs batches.
env_ids: list(int): List of environment codes.
env_aligned_pairs_idx: Ignored. Is only here to ensure compatibility with
the method "loss_function" which is defined in the parent class.
Returns:
domain mapping scalar loss (averaged over all environments).
"""
del env_aligned_pairs_idx
# Get all possible environment pairs
env_pairs = list(itertools.permutations(env_ids, 2))
aux_losses = []
l2s = []
for pair in env_pairs:
e1, e2 = pair
# We only have state_transformer for training envs.
if pair not in self.state_transformers:
logging.warn('Pair %s is not in the training pairs set.', str(pair))
else:
e1_index = env_ids.index(e1)
e2_index = env_ids.index(e2)
e1_labels = env_batches[e1_index]['label']
e2_labels = env_batches[e2_index]['label']
# Get representations for env1.
e1_reps = env_reps[e1_index]
# Get representations for env1.
e2_reps = env_reps[e2_index]
# Transform representations from env1.
transformed_e1 = self.state_transformers[pair](e1_reps)
ot_cost = self.ot_loss(transformed_e1, e2_reps, e1_labels, e2_labels)
aux_losses.append(ot_cost)
# Add l2 loss for the transformer weights (to make sure it is as minimal
# as possible.
l2s.append(
metrics.l2_regularization(
self.state_transformers[pair].params,
include_bias_terms=self.task_params.get('l2_for_bias', False)))
if not aux_losses:
aux_losses = [0]
l2s = [0]
alpha = self.task_params.get('aux_weight', .0)
beta = self.task_params.get('aux_l2', .0)
# Average and return the final weighted value of the loss.
return alpha * jnp.mean(jnp.array(aux_losses)) + beta * jnp.mean(
jnp.array(l2s))
class MultiEnvDannClassification(MultiEnvVRexClassificationTask):
"""Task class for Domain Adverserial NNs."""
def dann_loss(self, env_logits, env_labels, env_batches):
"""Compute DANN loss.
Reference: https://jmlr.org/papers/volume17/15-239/15-239.pdf
Args:
env_logits: list; Domain logits for all labeled environments. This is the
output of the domain discriminator module.
env_labels: list; Domain Labels.
env_batches: list(dict); List of batches of examples of all labeled
environments.
Returns:
Dann loss.
"""
# Domain CLS loss function:
loss_fn = functools.partial(
metrics.weighted_loss,
all_metrics.ALL_LOSSES['categorical_cross_entropy'])
# Agregate domain discriminator loss for all environments:
env_dann_losses = []
for i in range(len(env_logits)):
batch = env_batches[i]
loss_value, loss_normalizer = loss_fn(env_logits[i], env_labels[i],
batch.get('weights'))
loss = loss_value / loss_normalizer
loss = jax.lax.cond(
loss_normalizer > 0, lambda l: l, lambda _: 0.0, operand=loss)
env_dann_losses.append(loss)
total_dann_loss = jnp.mean(jnp.asarray(env_dann_losses))
return total_dann_loss
def loss_function(self,
env_logits,
env_batches,
all_env_logits,
all_env_labels,
all_env_batches,
dann_factor,
model_params=None,
step=0):
"""Returns loss with an L2 penalty on the weights.
Args:
env_logits: list(dict); List of logits for examples from different
environment (env_logits[0] is the logits for examples from env_ids[0]).
env_batches: list(dict); List of batches of examples from different
environment (env_batches[0] is a batch dict for examples from
env_ids[0]).
all_env_logits: list; Domain logits for all environments.
all_env_labels: list(dict); Domain labels for all environments.
all_env_batches: list(int): List of all environment batches.
dann_factor: float; DANN factor.
model_params: dict; Parameters of the model (used to compute l2).
step: int; Global training step.
Returns:
Total loss.
"""
total_loss = super().loss_function(
env_logits=env_logits,
env_batches=env_batches,
model_params=model_params,
step=step)
total_loss += self.dann_loss(all_env_logits, all_env_labels,
all_env_batches) * dann_factor
return total_loss
|
|
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils import six
from django.utils.text import capfirst
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from pdc.apps.contact.models import Contact, ContactRole
from pdc.apps.contact.serializers import RoleContactSerializer
from pdc.apps.common.serializers import DynamicFieldsSerializerMixin, LabelSerializer, StrictSerializerMixin
from pdc.apps.common.fields import ChoiceSlugField
from pdc.apps.release.models import Release
from pdc.apps.common.hacks import convert_str_to_int
from .models import (GlobalComponent,
RoleContact,
ReleaseComponent,
Upstream,
BugzillaComponent,
ReleaseComponentGroup,
GroupType,
ReleaseComponentType,
ReleaseComponentRelationshipType,
ReleaseComponentRelationship)
from . import signals
__all__ = (
'GlobalComponentSerializer',
'ReleaseComponentSerializer',
'HackedContactSerializer',
'UpstreamSerializer',
'BugzillaComponentSerializer',
'GroupSerializer',
'GroupTypeSerializer'
)
def reverse_url(request, view_name, **kwargs):
return request.build_absolute_uri(reverse(viewname=view_name,
kwargs=kwargs))
class HackedContactSerializer(RoleContactSerializer):
"""
Could use as a view leveled serializer to encode/decode the contact data, or
as a field in the global/release component.
Automatically replace the url with /[global|release]-components/<instance_pk>/contacts/<pk>.
Automatically set inherited = True when serialize release component.
"""
def __init__(self, *args, **kwargs):
self.inherited = kwargs.pop('inherited', False)
self.view_name = kwargs.pop('view_name', 'globalcomponentcontact-detail')
context = kwargs.get('context', None)
self.instance_pk = None
self.view = None
# Set view/instance_pk when uses the class as a serializer.
if context:
self.view = context.get('view', None)
extra_kwargs = context.get('extra_kwargs', None)
if extra_kwargs:
self.instance_pk = extra_kwargs.get('instance_pk', None)
super(HackedContactSerializer, self).__init__(*args, **kwargs)
def to_representation(self, obj):
ret = super(HackedContactSerializer, self).to_representation(obj)
request = self.context.get('request', None)
url_kwargs = self.context.get('extra_kwargs', {})
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# so if not provided, we should raise `KeyError`.
instance_pk = url_kwargs['instance_pk']
ret['url'] = reverse_url(request, self.view_name, **{
'instance_pk': instance_pk,
'pk': obj.pk
})
if self.inherited and self.view_name == 'globalcomponentcontact-detail':
ret['inherited'] = True
return ret
def to_internal_value(self, data):
# Run StrictSerializerMixin's to_internal_value() to check if extra field exists.
super(HackedContactSerializer, self).to_internal_value(data)
request = self.context.get('request', None)
serializer = RoleContactSerializer(data=data,
many=not isinstance(data, dict),
context={'request': request})
kwargs = {}
kwargs['contact_role'] = data.get('contact_role')
kwargs.update(data.get('contact'))
try:
contact = RoleContact.specific_objects.get(**kwargs)
except (RoleContact.DoesNotExist, Contact.DoesNotExist, ContactRole.DoesNotExist):
# If we can't get RoleContact in database, validate the input data and create the RoleContact.
if serializer.is_valid(raise_exception=True):
contact = RoleContact.specific_objects.create(**kwargs)
if request and request.changeset:
model_name = ContentType.objects.get_for_model(contact).model
request.changeset.add(model_name,
contact.id,
'null',
json.dumps(contact.export()))
component_class = self.view.model
if component_class.objects.get(pk=self.instance_pk).contacts.filter(pk=contact.pk).exists():
model_name = six.text_type(capfirst(component_class._meta.verbose_name))
raise serializers.ValidationError({"detail": "%s contact with this %s and Contact already exists."
% (model_name, model_name)})
else:
return contact
def save(self, **kwargs):
"""
Save the deserialized object and return it.
"""
instance_pk = self.context['extra_kwargs']['instance_pk']
component_class = self.context['view'].model
component = component_class.objects.get(pk=instance_pk)
existed_contacts = component.contacts.all()
if isinstance(self.validated_data, list):
contacts = [self.get_object_from_db(item) for item in self.validated_data if item not in existed_contacts]
component.contacts.add(*contacts)
if self.validated_data['_deleted']:
[self.delete_object(item) for item in self.validated_data['_deleted']]
else:
contacts = self.get_object_from_db(self.validated_data)
component.contacts.add(contacts)
return contacts
def get_object_from_db(self, item):
contact = RoleContact.objects.get(**{
'contact_role_id': item.contact_role_id,
'contact_id': item.contact_id
})
return contact
class Meta:
model = RoleContact
fields = ('url', 'contact_role', 'contact')
# In order not to run parent's validators, set validators to []
validators = []
class HackedContactField(serializers.Field):
"""
HackedContactField is used in GlobalComponentSerializer/ReleaseComponentSerializer insteadof HackedContactSerilizer.
It has the ablility to get_attribute() from GlobalComponentSerializer/ReleaseComponentSerializer.
"""
def __init__(self, view_name, *args, **kwargs):
self.view_name = view_name
super(HackedContactField, self).__init__(*args, **kwargs)
def to_representation(self, value):
serializer = HackedContactSerializer(value, many=True, context=self.context, view_name=self.view_name)
return serializer.data
def get_attribute(self, obj):
"""
Get attribute from the serializer which uses this field.
@param obj: The model object related to the serializer.
"""
# NOTE(xchu): The `instance_pk` is needed for building a valid url,
# it's not provided when used as a field, so we should inject one.
if 'extra_kwargs' not in self.context or 'instance_pk' not in self.context['extra_kwargs']:
self.context['extra_kwargs'] = {'instance_pk': obj.pk}
return obj.contacts.all()
class UpstreamSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = Upstream
fields = ('homepage', 'scm_type', 'scm_url')
class UpstreamRelatedField(serializers.RelatedField):
def to_representation(self, value):
serializer = UpstreamSerializer(value)
return serializer.data
def to_internal_value(self, value):
request = self.context.get('request', None)
if isinstance(value, dict):
try:
upstream = Upstream.objects.get(**value)
except Upstream.DoesNotExist:
serializer = UpstreamSerializer(data=value, many=False, context={'request': request})
if serializer.is_valid(raise_exception=True):
upstream = serializer.save()
model_name = ContentType.objects.get_for_model(upstream).model
if request and request.changeset:
request.changeset.add(model_name,
upstream.id,
'null',
json.dumps(upstream.export()))
return upstream
else:
self._errors = serializer._errors
except Exception as err:
raise serializers.ValidationError("Can not get or create Upstream with the input(%s): %s." % (value, err))
else:
return upstream
else:
raise serializers.ValidationError("Unsupported upstream input.")
class GlobalComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
contacts = HackedContactField(required=False, read_only=False, view_name='globalcomponentcontact-detail')
name = serializers.CharField(required=True,
max_length=100)
dist_git_path = serializers.CharField(required=False,
max_length=200,
allow_blank=True)
dist_git_web_url = serializers.URLField(required=False,
max_length=200)
labels = LabelSerializer(many=True, required=False, read_only=True)
upstream = UpstreamRelatedField(read_only=False, required=False, queryset=Upstream.objects.all())
class Meta:
model = GlobalComponent
fields = ('id', 'name', 'dist_git_path', 'dist_git_web_url', 'contacts', 'labels', 'upstream')
class TreeForeignKeyField(serializers.Field):
def to_representation(self, value):
request = self.context.get("request", None)
serializer = BugzillaComponentSerializer(value, context={'request': request, 'top_level': False})
return serializer.data
def to_internal_value(self, data):
if data.strip() == "":
raise serializers.ValidationError({'bugzilla_component': 'This field is required.'})
else:
components = data.strip("/").split("/")
len_components = len(components)
bc = None
# Only Bugzilla component name exists, parent component name will be considered as None.
if len_components == 1:
try:
bc = BugzillaComponent.objects.get(name=components[0], parent_component=None)
except:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
# Not only bugzilla Component, but also its ancestors exist.
if len_components > 1:
z = zip(components, components[1:])
root_bc_name, bc_name = z[0]
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__name=root_bc_name)
for _, bc_name in z[1:]:
qs = BugzillaComponent.objects.filter(name=bc_name, parent_component__in=qs)
if not qs:
raise serializers.ValidationError({'bugzilla_component': ("Bugzilla component with name %s does not exist."
% data)})
if len(qs) > 1:
raise serializers.ValidationError({'bugzilla_component': ("Duplicate Bugzilla component with name %s exists."
% data)})
if qs:
bc = qs[0]
return bc
class BugzillaComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
Bugzilla Component serializer.
"""
parent_component = serializers.CharField(required=False, max_length=200)
subcomponents = serializers.SerializerMethodField()
extra_fields = ['parent_pk']
def get_subcomponents(self, obj):
"""[string]"""
return obj.get_subcomponents()
class Meta:
model = BugzillaComponent
fields = ('id', 'name', 'parent_component', 'subcomponents')
class ReleaseField(serializers.SlugRelatedField):
def __init__(self, **kwargs):
super(ReleaseField, self).__init__(slug_field='release_id',
queryset=Release.objects.all(),
**kwargs)
def to_representation(self, value):
return {
'release_id': value.release_id,
'active': value.active
}
class ReleaseComponentTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentType
fields = ('name',)
class ReleaseComponentSerializer(DynamicFieldsSerializerMixin,
StrictSerializerMixin,
serializers.HyperlinkedModelSerializer):
"""
ReleaseComponent Serializer
"""
release = ReleaseField(read_only=False)
global_component = serializers.SlugRelatedField(slug_field='name', read_only=False, queryset=GlobalComponent.objects.all())
contacts = HackedContactField(required=False, read_only=False, view_name='releasecomponentcontact-detail')
dist_git_branch = serializers.CharField(source='inherited_dist_git_branch', required=False)
dist_git_web_url = serializers.URLField(required=False, max_length=200, read_only=True)
bugzilla_component = TreeForeignKeyField(read_only=False, required=False, allow_null=True)
brew_package = serializers.CharField(required=False)
active = serializers.BooleanField(required=False, default=True)
type = ChoiceSlugField(slug_field='name', queryset=ReleaseComponentType.objects.all(), required=False,
allow_null=True)
def update(self, instance, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).update(instance, validated_data)
signals.releasecomponent_serializer_post_update.send(sender=self, release_component=instance)
if hasattr(instance, 'pk'):
# reload to make sure changes in mapping are reflected
instance = ReleaseComponent.objects.get(pk=instance.pk)
# from view's doc, for ReleaseComponent,
# PUT and PATCH update works the same as each other except `name` is required when PUT update,
# so there will be not setattr here.
return instance
def create(self, validated_data):
signals.releasecomponent_serializer_extract_data.send(sender=self, validated_data=validated_data)
instance = super(ReleaseComponentSerializer, self).create(validated_data)
signals.releasecomponent_serializer_post_create.send(sender=self, release_component=instance)
return instance
def to_representation(self, instance):
ret = super(ReleaseComponentSerializer, self).to_representation(instance)
request = self.context.get("request", None)
# Include global component contacts - PDC-184
gcs = GlobalComponentSerializer(
instance=instance.global_component,
context={'request': request})
# Exclude global component contacts whose contact_role are already in release component contacts
gcc = gcs.data.get('contacts', [])
contacts = ret.get('contacts', [])
contact_role_lists = [contact['contact_role'] for contact in contacts]
for contact in gcc:
if contact['contact_role'] in contact_role_lists:
continue
contact['inherited'] = True
contacts.append(contact)
return ret
def to_internal_value(self, data):
# Raise error explictly when release and global_component are given.
if self.instance:
allowed_keys = self.get_allowed_keys() - set(['release', 'global_component'])
extra_fields = set(data.keys()) - allowed_keys
self.maybe_raise_error(extra_fields)
data['release'] = self.instance.release
data['global_component'] = self.instance.global_component
return super(ReleaseComponentSerializer, self).to_internal_value(data)
def validate_release(self, value):
if not isinstance(value, Release):
if isinstance(value, dict):
release_id = value['release_id']
else:
release_id = value
if release_id is None or release_id.strip() == "":
self._errors = {'release': 'This field is required.'}
return
release = get_object_or_404(Release, release_id=release_id)
if not release.is_active():
self._errors = {'release': 'Can not create a release component with an inactive release.'}
return
value = release
return value
def validate_global_component(self, value):
if not isinstance(value, GlobalComponent):
global_component_name = value
if global_component_name is None or global_component_name.strip() == "":
self._errors = {'global_component': 'This field is required.'}
return
gc = get_object_or_404(GlobalComponent, name=global_component_name)
value = gc
return value
def validate_name(self, value):
if value.strip() == "":
self._errors = {'name': 'This field is required.'}
return value
def validate_type(self, value):
if not isinstance(value, ReleaseComponentType):
if value is not None and value.strip() != "":
value = get_object_or_404(ReleaseComponentType, name=value.strip())
else:
raise serializers.ValidationError("This field can't be set to null.")
return value
class Meta:
model = ReleaseComponent
fields = ('id', 'release', 'bugzilla_component', 'brew_package', 'global_component',
'name', 'dist_git_branch', 'dist_git_web_url', 'active',
'contacts', 'type')
validators = [UniqueTogetherValidator(
queryset=ReleaseComponent.objects.all(),
fields=('name', 'release', 'global_component')
)]
class GroupTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
description = serializers.CharField(required=False)
class Meta:
model = GroupType
fields = ('id', 'name', 'description')
class ReleaseComponentRelatedField(serializers.RelatedField):
doc_format = '{"id": "int", "name": "string"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
return result
def to_internal_value(self, data):
if not isinstance(data, dict):
raise serializers.ValidationError({'detail': "Input [%s] for ReleaseComponent must be a dict." % data})
if set(data.keys()) not in [set(['id']), set(['release', 'global_component', 'name'])]:
raise serializers.ValidationError(
{'detail': "Only accept ['id'] or ['release', 'global_component', 'name']"})
kwargs = dict()
if 'id' in data:
kwargs['id'] = convert_str_to_int(data.get('id'))
else:
kwargs['release__release_id'] = data.get('release')
kwargs['global_component__name'] = data.get('global_component')
kwargs['name'] = data.get('name')
try:
rc = ReleaseComponent.objects.get(**kwargs)
except ReleaseComponent.DoesNotExist:
raise serializers.ValidationError({'detail': "ReleaseComponent [%s] doesn't exist" % data})
return rc
class GroupSerializer(StrictSerializerMixin, serializers.ModelSerializer):
group_type = serializers.SlugRelatedField(
queryset=GroupType.objects.all(),
slug_field='name',
required=True
)
release = serializers.SlugRelatedField(
queryset=Release.objects.all(),
slug_field='release_id',
required=True
)
description = serializers.CharField(required=True)
components = ReleaseComponentRelatedField(
required=False,
many=True,
queryset=ReleaseComponent.objects.all()
)
def validate(self, value):
# # POST
if not self.instance:
components = value.get('components', [])
release = value.get('release')
# PUT or PATCH
else:
components = value.get('components', self.instance.components.all())
release = value.get('release', self.instance.release)
for component in components:
if component.release != release:
raise serializers.ValidationError({
'detail': 'Not allow to group release_component[%s] <release[%s]> with other release[%s].'
% (component.name, component.release.release_id, release.release_id)})
return value
class Meta:
model = ReleaseComponentGroup
fields = ('id', 'group_type', 'description', 'release', 'components')
class RCRelationshipTypeSerializer(StrictSerializerMixin, serializers.ModelSerializer):
class Meta:
model = ReleaseComponentRelationshipType
fields = ('name',)
class RCForRelationshipRelatedField(ReleaseComponentRelatedField):
doc_format = '{"id": "int", "name": "string", "release": "Release.release_id"}'
def to_representation(self, value):
result = dict()
if value:
result['id'] = value.id
result['name'] = value.name
result['release'] = value.release.release_id
return result
class ReleaseComponentRelationshipSerializer(StrictSerializerMixin, serializers.ModelSerializer):
type = ChoiceSlugField(
queryset=ReleaseComponentRelationshipType.objects.all(),
slug_field='name',
required=True,
source='relation_type'
)
from_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
to_component = RCForRelationshipRelatedField(
required=True,
queryset=ReleaseComponent.objects.all()
)
class Meta:
model = ReleaseComponentRelationship
fields = ('id', 'type', 'from_component', 'to_component')
|
|
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# License: BSD (3-clause)
import numpy as np
import mne
from mne.io import set_bipolar_reference
from mne.io.bti.bti import (
_convert_coil_trans, _coil_trans_to_loc, _get_bti_dev_t,
_loc_to_coil_trans)
from mne.transforms import Transform
from mne.utils import logger
from .io import read_info
from .io.read import _hcp_pick_info
from .io.read import _data_labels
def set_eog_ecg_channels(raw):
"""Set the HCP ECG and EOG channels
.. note::
Operates in place.
Parameters
----------
raw : instance of Raw
the hcp raw data.
"""
for kind in ['ECG', 'VEOG', 'HEOG']:
set_bipolar_reference(
raw, anode=kind + '-', cathode=kind + '+', ch_name=kind,
copy=False)
raw.set_channel_types({'ECG': 'ecg', 'VEOG': 'eog', 'HEOG': 'eog'})
def apply_ica_hcp(raw, ica_mat, exclude):
"""Apply the HCP ICA.
.. note::
Operates in place and data must be loaded.
Parameters
----------
raw : instance of Raw
the hcp raw data.
ica_mat : numpy structured array
The hcp ICA solution
exclude : array-like
the components to be excluded.
"""
if not raw.preload:
raise RuntimeError('raw data must be loaded, use raw.load_data()')
ch_names = ica_mat['topolabel'].tolist().tolist()
picks = mne.pick_channels(raw.info['ch_names'], include=ch_names)
assert ch_names == [raw.ch_names[p] for p in picks]
unmixing_matrix = np.array(ica_mat['unmixing'].tolist())
n_components, n_channels = unmixing_matrix.shape
mixing = np.array(ica_mat['topo'].tolist())
proj_mat = (np.eye(n_channels) - np.dot(
mixing[:, exclude], unmixing_matrix[exclude]))
raw._data *= 1e15
raw._data[picks] = np.dot(proj_mat, raw._data[picks])
raw._data /= 1e15
def apply_ref_correction(raw, decim_fit=100):
"""Regress out MEG ref channels
Computes linear models from MEG reference channels
on each sensors, predicts the MEG data and subtracts
and computes the residual by subtracting the predictions.
.. note::
Operates in place.
.. note::
Can be memory demanding. To alleviate this problem the model can be fit
on decimated data. This is legitimate because the linear model does
not have any representation of time, only the distributions
matter.
Parameters
----------
raw : instance of Raw
The BTi/4D raw data.
decim_fit : int
The decimation factor used for fitting the model.
Defaults to 100.
"""
from sklearn.linear_model import LinearRegression
meg_picks = mne.pick_types(raw.info, ref_meg=False, meg=True)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)
if len(ref_picks) == 0:
raise ValueError('Could not find meg ref channels.')
estimator = LinearRegression(normalize=True) # ref MAG + GRAD
Y_pred = estimator.fit(
raw[ref_picks][0][:, ::decim_fit].T,
raw[meg_picks][0][:, ::decim_fit].T).predict(
raw[ref_picks][0].T)
raw._data[meg_picks] -= Y_pred.T
def map_ch_coords_to_mne(inst):
"""Transform sensors to MNE coordinates
.. note::
operates in place
.. warning::
For several reasons we do not use the MNE coordinates for the inverse
modeling. This however won't always play nicely with visualization.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
"""
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t())
dev_ctf_t = inst.info['dev_ctf_t']
for ch in inst.info['chs']:
loc = ch['loc'][:]
if loc is not None:
logger.debug('converting %s' % ch['ch_name'])
t = _loc_to_coil_trans(loc)
t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
loc = _coil_trans_to_loc(t)
ch['loc'] = loc
def interpolate_missing(inst, subject, data_type, hcp_path,
run_index=0, mode='fast'):
"""Interpolate all MEG channels that are missing
.. warning::
This function may require some memory.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
out : MNE data containers
Raw, Epochs, Evoked but with missing channels interpolated.
"""
try:
info = read_info(
subject=subject, data_type=data_type, hcp_path=hcp_path,
run_index=run_index if run_index is None else run_index)
except (ValueError, IOError):
raise ValueError(
'could not find config to complete info.'
'reading only channel positions without '
'transforms.')
# full BTI MEG channels
bti_meg_channel_names = ['A%i' % ii for ii in range(1, 249, 1)]
# figure out which channels are missing
bti_meg_channel_missing_names = [
ch for ch in bti_meg_channel_names if ch not in inst.ch_names]
# get meg picks
picks_meg = mne.pick_types(inst.info, meg=True, ref_meg=False)
# some non-contiguous block in the middle so let's try to invert
picks_other = [ii for ii in range(len(inst.ch_names)) if ii not in
picks_meg]
other_chans = [inst.ch_names[po] for po in picks_other]
# compute new n channels
n_channels = (len(picks_meg) +
len(bti_meg_channel_missing_names) +
len(other_chans))
# restrict info to final channels
# ! info read from config file is not sorted like inst.info
# ! therefore picking order matters, but we don't know it.
# ! so far we will rely on the consistent layout for raw files
final_names = [ch for ch in _data_labels if ch in bti_meg_channel_names or
ch in other_chans]
info = _hcp_pick_info(info, final_names)
assert len(info['ch_names']) == n_channels
existing_channels_index = [ii for ii, ch in enumerate(info['ch_names']) if
ch in inst.ch_names]
info['sfreq'] = inst.info['sfreq']
# compute shape of data to be added
is_raw = isinstance(inst, (mne.io.Raw,
mne.io.RawArray,
mne.io.bti.bti.RawBTi))
is_epochs = isinstance(inst, mne.BaseEpochs)
is_evoked = isinstance(inst, (mne.Evoked, mne.EvokedArray))
if is_raw:
shape = (n_channels,
(inst.last_samp - inst.first_samp) + 1)
data = inst._data
elif is_epochs:
shape = (n_channels, len(inst.events), len(inst.times))
data = np.transpose(inst.get_data(), (1, 0, 2))
elif is_evoked:
shape = (n_channels, len(inst.times))
data = inst.data
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
out_data = np.empty(shape, dtype=data.dtype)
out_data[existing_channels_index] = data
if is_raw:
out = mne.io.RawArray(out_data, info)
if inst.annotations is not None:
out.annotations = inst.annotations
elif is_epochs:
out = mne.EpochsArray(data=np.transpose(out_data, (1, 0, 2)),
info=info, events=inst.events,
tmin=inst.times.min(), event_id=inst.event_id)
elif is_evoked:
out = mne.EvokedArray(
data=out_data, info=info, tmin=inst.times.min(),
comment=inst.comment, nave=inst.nave, kind=inst.kind)
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
# set "bad" channels and interpolate.
out.info['bads'] = bti_meg_channel_missing_names
out.interpolate_bads(mode=mode)
return out
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.